1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/IR/Instructions.h"
15#include "LLVMContextImpl.h"
16#include "llvm/ADT/SmallBitVector.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
22#include "llvm/IR/ConstantRange.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
25#include "llvm/IR/DerivedTypes.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstrTypes.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/IR/Operator.h"
35#include "llvm/IR/ProfDataUtils.h"
36#include "llvm/IR/Type.h"
37#include "llvm/IR/Value.h"
38#include "llvm/Support/AtomicOrdering.h"
39#include "llvm/Support/Casting.h"
40#include "llvm/Support/ErrorHandling.h"
41#include "llvm/Support/MathExtras.h"
42#include "llvm/Support/ModRef.h"
43#include "llvm/Support/TypeSize.h"
44#include <algorithm>
45#include <cassert>
46#include <cstdint>
47#include <optional>
48#include <vector>
49
50using namespace llvm;
51
52static cl::opt<bool> DisableI2pP2iOpt(
53 "disable-i2p-p2i-opt", cl::init(Val: false),
54 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
55
56//===----------------------------------------------------------------------===//
57// AllocaInst Class
58//===----------------------------------------------------------------------===//
59
60std::optional<TypeSize>
61AllocaInst::getAllocationSize(const DataLayout &DL) const {
62 TypeSize Size = DL.getTypeAllocSize(Ty: getAllocatedType());
63 if (isArrayAllocation()) {
64 auto *C = dyn_cast<ConstantInt>(Val: getArraySize());
65 if (!C)
66 return std::nullopt;
67 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
68 Size *= C->getZExtValue();
69 }
70 return Size;
71}
72
73std::optional<TypeSize>
74AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const {
75 std::optional<TypeSize> Size = getAllocationSize(DL);
76 if (Size)
77 return *Size * 8;
78 return std::nullopt;
79}
80
81//===----------------------------------------------------------------------===//
82// SelectInst Class
83//===----------------------------------------------------------------------===//
84
85/// areInvalidOperands - Return a string if the specified operands are invalid
86/// for a select operation, otherwise return null.
87const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
88 if (Op1->getType() != Op2->getType())
89 return "both values to select must have same type";
90
91 if (Op1->getType()->isTokenTy())
92 return "select values cannot have token type";
93
94 if (VectorType *VT = dyn_cast<VectorType>(Val: Op0->getType())) {
95 // Vector select.
96 if (VT->getElementType() != Type::getInt1Ty(C&: Op0->getContext()))
97 return "vector select condition element type must be i1";
98 VectorType *ET = dyn_cast<VectorType>(Val: Op1->getType());
99 if (!ET)
100 return "selected values for vector select must be vectors";
101 if (ET->getElementCount() != VT->getElementCount())
102 return "vector select requires selected vectors to have "
103 "the same vector length as select condition";
104 } else if (Op0->getType() != Type::getInt1Ty(C&: Op0->getContext())) {
105 return "select condition must be i1 or <n x i1>";
106 }
107 return nullptr;
108}
109
110//===----------------------------------------------------------------------===//
111// PHINode Class
112//===----------------------------------------------------------------------===//
113
114PHINode::PHINode(const PHINode &PN)
115 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
116 ReservedSpace(PN.getNumOperands()) {
117 allocHungoffUses(N: PN.getNumOperands());
118 std::copy(first: PN.op_begin(), last: PN.op_end(), result: op_begin());
119 copyIncomingBlocks(BBRange: make_range(x: PN.block_begin(), y: PN.block_end()));
120 SubclassOptionalData = PN.SubclassOptionalData;
121}
122
123// removeIncomingValue - Remove an incoming value. This is useful if a
124// predecessor basic block is deleted.
125Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
126 Value *Removed = getIncomingValue(i: Idx);
127
128 // Move everything after this operand down.
129 //
130 // FIXME: we could just swap with the end of the list, then erase. However,
131 // clients might not expect this to happen. The code as it is thrashes the
132 // use/def lists, which is kinda lame.
133 std::copy(first: op_begin() + Idx + 1, last: op_end(), result: op_begin() + Idx);
134 copyIncomingBlocks(BBRange: drop_begin(RangeOrContainer: blocks(), N: Idx + 1), ToIdx: Idx);
135
136 // Nuke the last value.
137 Op<-1>().set(nullptr);
138 setNumHungOffUseOperands(getNumOperands() - 1);
139
140 // If the PHI node is dead, because it has zero entries, nuke it now.
141 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
142 // If anyone is using this PHI, make them use a dummy value instead...
143 replaceAllUsesWith(V: PoisonValue::get(T: getType()));
144 eraseFromParent();
145 }
146 return Removed;
147}
148
149void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
150 bool DeletePHIIfEmpty) {
151 SmallDenseSet<unsigned> RemoveIndices;
152 for (unsigned Idx = 0; Idx < getNumIncomingValues(); ++Idx)
153 if (Predicate(Idx))
154 RemoveIndices.insert(V: Idx);
155
156 if (RemoveIndices.empty())
157 return;
158
159 // Remove operands.
160 auto NewOpEnd = remove_if(Range: operands(), P: [&](Use &U) {
161 return RemoveIndices.contains(V: U.getOperandNo());
162 });
163 for (Use &U : make_range(x: NewOpEnd, y: op_end()))
164 U.set(nullptr);
165
166 // Remove incoming blocks.
167 (void)std::remove_if(first: const_cast<block_iterator>(block_begin()),
168 last: const_cast<block_iterator>(block_end()), pred: [&](BasicBlock *&BB) {
169 return RemoveIndices.contains(V: &BB - block_begin());
170 });
171
172 setNumHungOffUseOperands(getNumOperands() - RemoveIndices.size());
173
174 // If the PHI node is dead, because it has zero entries, nuke it now.
175 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
176 // If anyone is using this PHI, make them use a dummy value instead...
177 replaceAllUsesWith(V: PoisonValue::get(T: getType()));
178 eraseFromParent();
179 }
180}
181
182/// growOperands - grow operands - This grows the operand list in response
183/// to a push_back style of operation. This grows the number of ops by 1.5
184/// times.
185///
186void PHINode::growOperands() {
187 unsigned e = getNumOperands();
188 unsigned NumOps = e + e / 2;
189 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
190
191 ReservedSpace = NumOps;
192 growHungoffUses(N: ReservedSpace, /* IsPhi */ true);
193}
194
195/// hasConstantValue - If the specified PHI node always merges together the same
196/// value, return the value, otherwise return null.
197Value *PHINode::hasConstantValue() const {
198 // Exploit the fact that phi nodes always have at least one entry.
199 Value *ConstantValue = getIncomingValue(i: 0);
200 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
201 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
202 if (ConstantValue != this)
203 return nullptr; // Incoming values not all the same.
204 // The case where the first value is this PHI.
205 ConstantValue = getIncomingValue(i);
206 }
207 if (ConstantValue == this)
208 return UndefValue::get(T: getType());
209 return ConstantValue;
210}
211
212/// hasConstantOrUndefValue - Whether the specified PHI node always merges
213/// together the same value, assuming that undefs result in the same value as
214/// non-undefs.
215/// Unlike \ref hasConstantValue, this does not return a value because the
216/// unique non-undef incoming value need not dominate the PHI node.
217bool PHINode::hasConstantOrUndefValue() const {
218 Value *ConstantValue = nullptr;
219 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
220 Value *Incoming = getIncomingValue(i);
221 if (Incoming != this && !isa<UndefValue>(Val: Incoming)) {
222 if (ConstantValue && ConstantValue != Incoming)
223 return false;
224 ConstantValue = Incoming;
225 }
226 }
227 return true;
228}
229
230//===----------------------------------------------------------------------===//
231// LandingPadInst Implementation
232//===----------------------------------------------------------------------===//
233
234LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
235 const Twine &NameStr,
236 BasicBlock::iterator InsertBefore)
237 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
238 init(NumReservedValues, NameStr);
239}
240
241LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
242 const Twine &NameStr, Instruction *InsertBefore)
243 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
244 init(NumReservedValues, NameStr);
245}
246
247LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
248 const Twine &NameStr, BasicBlock *InsertAtEnd)
249 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
250 init(NumReservedValues, NameStr);
251}
252
253LandingPadInst::LandingPadInst(const LandingPadInst &LP)
254 : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
255 LP.getNumOperands()),
256 ReservedSpace(LP.getNumOperands()) {
257 allocHungoffUses(N: LP.getNumOperands());
258 Use *OL = getOperandList();
259 const Use *InOL = LP.getOperandList();
260 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
261 OL[I] = InOL[I];
262
263 setCleanup(LP.isCleanup());
264}
265
266LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
267 const Twine &NameStr,
268 Instruction *InsertBefore) {
269 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
270}
271
272LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
273 const Twine &NameStr,
274 BasicBlock *InsertAtEnd) {
275 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
276}
277
278void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
279 ReservedSpace = NumReservedValues;
280 setNumHungOffUseOperands(0);
281 allocHungoffUses(N: ReservedSpace);
282 setName(NameStr);
283 setCleanup(false);
284}
285
286/// growOperands - grow operands - This grows the operand list in response to a
287/// push_back style of operation. This grows the number of ops by 2 times.
288void LandingPadInst::growOperands(unsigned Size) {
289 unsigned e = getNumOperands();
290 if (ReservedSpace >= e + Size) return;
291 ReservedSpace = (std::max(a: e, b: 1U) + Size / 2) * 2;
292 growHungoffUses(N: ReservedSpace);
293}
294
295void LandingPadInst::addClause(Constant *Val) {
296 unsigned OpNo = getNumOperands();
297 growOperands(Size: 1);
298 assert(OpNo < ReservedSpace && "Growing didn't work!");
299 setNumHungOffUseOperands(getNumOperands() + 1);
300 getOperandList()[OpNo] = Val;
301}
302
303//===----------------------------------------------------------------------===//
304// CallBase Implementation
305//===----------------------------------------------------------------------===//
306
307CallBase *CallBase::Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
308 BasicBlock::iterator InsertPt) {
309 switch (CB->getOpcode()) {
310 case Instruction::Call:
311 return CallInst::Create(CI: cast<CallInst>(Val: CB), Bundles, InsertPt);
312 case Instruction::Invoke:
313 return InvokeInst::Create(II: cast<InvokeInst>(Val: CB), Bundles, InsertPt);
314 case Instruction::CallBr:
315 return CallBrInst::Create(CBI: cast<CallBrInst>(Val: CB), Bundles, InsertPt);
316 default:
317 llvm_unreachable("Unknown CallBase sub-class!");
318 }
319}
320
321CallBase *CallBase::Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
322 Instruction *InsertPt) {
323 switch (CB->getOpcode()) {
324 case Instruction::Call:
325 return CallInst::Create(CI: cast<CallInst>(Val: CB), Bundles, InsertPt);
326 case Instruction::Invoke:
327 return InvokeInst::Create(II: cast<InvokeInst>(Val: CB), Bundles, InsertPt);
328 case Instruction::CallBr:
329 return CallBrInst::Create(CBI: cast<CallBrInst>(Val: CB), Bundles, InsertPt);
330 default:
331 llvm_unreachable("Unknown CallBase sub-class!");
332 }
333}
334
335CallBase *CallBase::Create(CallBase *CI, OperandBundleDef OpB,
336 Instruction *InsertPt) {
337 SmallVector<OperandBundleDef, 2> OpDefs;
338 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
339 auto ChildOB = CI->getOperandBundleAt(Index: i);
340 if (ChildOB.getTagName() != OpB.getTag())
341 OpDefs.emplace_back(Args&: ChildOB);
342 }
343 OpDefs.emplace_back(Args&: OpB);
344 return CallBase::Create(CB: CI, Bundles: OpDefs, InsertPt);
345}
346
347
348Function *CallBase::getCaller() { return getParent()->getParent(); }
349
350unsigned CallBase::getNumSubclassExtraOperandsDynamic() const {
351 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
352 return cast<CallBrInst>(Val: this)->getNumIndirectDests() + 1;
353}
354
355bool CallBase::isIndirectCall() const {
356 const Value *V = getCalledOperand();
357 if (isa<Function>(Val: V) || isa<Constant>(Val: V))
358 return false;
359 return !isInlineAsm();
360}
361
362/// Tests if this call site must be tail call optimized. Only a CallInst can
363/// be tail call optimized.
364bool CallBase::isMustTailCall() const {
365 if (auto *CI = dyn_cast<CallInst>(Val: this))
366 return CI->isMustTailCall();
367 return false;
368}
369
370/// Tests if this call site is marked as a tail call.
371bool CallBase::isTailCall() const {
372 if (auto *CI = dyn_cast<CallInst>(Val: this))
373 return CI->isTailCall();
374 return false;
375}
376
377Intrinsic::ID CallBase::getIntrinsicID() const {
378 if (auto *F = getCalledFunction())
379 return F->getIntrinsicID();
380 return Intrinsic::not_intrinsic;
381}
382
383FPClassTest CallBase::getRetNoFPClass() const {
384 FPClassTest Mask = Attrs.getRetNoFPClass();
385
386 if (const Function *F = getCalledFunction())
387 Mask |= F->getAttributes().getRetNoFPClass();
388 return Mask;
389}
390
391FPClassTest CallBase::getParamNoFPClass(unsigned i) const {
392 FPClassTest Mask = Attrs.getParamNoFPClass(ArgNo: i);
393
394 if (const Function *F = getCalledFunction())
395 Mask |= F->getAttributes().getParamNoFPClass(ArgNo: i);
396 return Mask;
397}
398
399std::optional<ConstantRange> CallBase::getRange() const {
400 const Attribute RangeAttr = getRetAttr(llvm::Attribute::Kind: Range);
401 if (RangeAttr.isValid())
402 return RangeAttr.getRange();
403 return std::nullopt;
404}
405
406bool CallBase::isReturnNonNull() const {
407 if (hasRetAttr(Attribute::NonNull))
408 return true;
409
410 if (getRetDereferenceableBytes() > 0 &&
411 !NullPointerIsDefined(F: getCaller(), AS: getType()->getPointerAddressSpace()))
412 return true;
413
414 return false;
415}
416
417Value *CallBase::getArgOperandWithAttribute(Attribute::AttrKind Kind) const {
418 unsigned Index;
419
420 if (Attrs.hasAttrSomewhere(Kind, Index: &Index))
421 return getArgOperand(i: Index - AttributeList::FirstArgIndex);
422 if (const Function *F = getCalledFunction())
423 if (F->getAttributes().hasAttrSomewhere(Kind, Index: &Index))
424 return getArgOperand(i: Index - AttributeList::FirstArgIndex);
425
426 return nullptr;
427}
428
429/// Determine whether the argument or parameter has the given attribute.
430bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
431 assert(ArgNo < arg_size() && "Param index out of bounds!");
432
433 if (Attrs.hasParamAttr(ArgNo, Kind))
434 return true;
435
436 const Function *F = getCalledFunction();
437 if (!F)
438 return false;
439
440 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
441 return false;
442
443 // Take into account mod/ref by operand bundles.
444 switch (Kind) {
445 case Attribute::ReadNone:
446 return !hasReadingOperandBundles() && !hasClobberingOperandBundles();
447 case Attribute::ReadOnly:
448 return !hasClobberingOperandBundles();
449 case Attribute::WriteOnly:
450 return !hasReadingOperandBundles();
451 default:
452 return true;
453 }
454}
455
456bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
457 Value *V = getCalledOperand();
458 if (auto *CE = dyn_cast<ConstantExpr>(Val: V))
459 if (CE->getOpcode() == BitCast)
460 V = CE->getOperand(i_nocapture: 0);
461
462 if (auto *F = dyn_cast<Function>(Val: V))
463 return F->getAttributes().hasFnAttr(Kind);
464
465 return false;
466}
467
468bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
469 Value *V = getCalledOperand();
470 if (auto *CE = dyn_cast<ConstantExpr>(Val: V))
471 if (CE->getOpcode() == BitCast)
472 V = CE->getOperand(i_nocapture: 0);
473
474 if (auto *F = dyn_cast<Function>(Val: V))
475 return F->getAttributes().hasFnAttr(Kind);
476
477 return false;
478}
479
480template <typename AK>
481Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
482 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
483 // getMemoryEffects() correctly combines memory effects from the call-site,
484 // operand bundles and function.
485 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
486 }
487
488 Value *V = getCalledOperand();
489 if (auto *CE = dyn_cast<ConstantExpr>(Val: V))
490 if (CE->getOpcode() == BitCast)
491 V = CE->getOperand(i_nocapture: 0);
492
493 if (auto *F = dyn_cast<Function>(Val: V))
494 return F->getAttributes().getFnAttr(Kind);
495
496 return Attribute();
497}
498
499template Attribute
500CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
501template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
502
503void CallBase::getOperandBundlesAsDefs(
504 SmallVectorImpl<OperandBundleDef> &Defs) const {
505 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
506 Defs.emplace_back(Args: getOperandBundleAt(Index: i));
507}
508
509CallBase::op_iterator
510CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
511 const unsigned BeginIndex) {
512 auto It = op_begin() + BeginIndex;
513 for (auto &B : Bundles)
514 It = std::copy(first: B.input_begin(), last: B.input_end(), result: It);
515
516 auto *ContextImpl = getContext().pImpl;
517 auto BI = Bundles.begin();
518 unsigned CurrentIndex = BeginIndex;
519
520 for (auto &BOI : bundle_op_infos()) {
521 assert(BI != Bundles.end() && "Incorrect allocation?");
522
523 BOI.Tag = ContextImpl->getOrInsertBundleTag(Tag: BI->getTag());
524 BOI.Begin = CurrentIndex;
525 BOI.End = CurrentIndex + BI->input_size();
526 CurrentIndex = BOI.End;
527 BI++;
528 }
529
530 assert(BI == Bundles.end() && "Incorrect allocation?");
531
532 return It;
533}
534
535CallBase::BundleOpInfo &CallBase::getBundleOpInfoForOperand(unsigned OpIdx) {
536 /// When there isn't many bundles, we do a simple linear search.
537 /// Else fallback to a binary-search that use the fact that bundles usually
538 /// have similar number of argument to get faster convergence.
539 if (bundle_op_info_end() - bundle_op_info_begin() < 8) {
540 for (auto &BOI : bundle_op_infos())
541 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
542 return BOI;
543
544 llvm_unreachable("Did not find operand bundle for operand!");
545 }
546
547 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
548 assert(bundle_op_info_end() - bundle_op_info_begin() > 0 &&
549 OpIdx < std::prev(bundle_op_info_end())->End &&
550 "The Idx isn't in the operand bundle");
551
552 /// We need a decimal number below and to prevent using floating point numbers
553 /// we use an intergal value multiplied by this constant.
554 constexpr unsigned NumberScaling = 1024;
555
556 bundle_op_iterator Begin = bundle_op_info_begin();
557 bundle_op_iterator End = bundle_op_info_end();
558 bundle_op_iterator Current = Begin;
559
560 while (Begin != End) {
561 unsigned ScaledOperandPerBundle =
562 NumberScaling * (std::prev(x: End)->End - Begin->Begin) / (End - Begin);
563 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
564 ScaledOperandPerBundle);
565 if (Current >= End)
566 Current = std::prev(x: End);
567 assert(Current < End && Current >= Begin &&
568 "the operand bundle doesn't cover every value in the range");
569 if (OpIdx >= Current->Begin && OpIdx < Current->End)
570 break;
571 if (OpIdx >= Current->End)
572 Begin = Current + 1;
573 else
574 End = Current;
575 }
576
577 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
578 "the operand bundle doesn't cover every value in the range");
579 return *Current;
580}
581
582CallBase *CallBase::addOperandBundle(CallBase *CB, uint32_t ID,
583 OperandBundleDef OB,
584 BasicBlock::iterator InsertPt) {
585 if (CB->getOperandBundle(ID))
586 return CB;
587
588 SmallVector<OperandBundleDef, 1> Bundles;
589 CB->getOperandBundlesAsDefs(Defs&: Bundles);
590 Bundles.push_back(Elt: OB);
591 return Create(CB, Bundles, InsertPt);
592}
593
594CallBase *CallBase::addOperandBundle(CallBase *CB, uint32_t ID,
595 OperandBundleDef OB,
596 Instruction *InsertPt) {
597 if (CB->getOperandBundle(ID))
598 return CB;
599
600 SmallVector<OperandBundleDef, 1> Bundles;
601 CB->getOperandBundlesAsDefs(Defs&: Bundles);
602 Bundles.push_back(Elt: OB);
603 return Create(CB, Bundles, InsertPt);
604}
605
606CallBase *CallBase::removeOperandBundle(CallBase *CB, uint32_t ID,
607 BasicBlock::iterator InsertPt) {
608 SmallVector<OperandBundleDef, 1> Bundles;
609 bool CreateNew = false;
610
611 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
612 auto Bundle = CB->getOperandBundleAt(Index: I);
613 if (Bundle.getTagID() == ID) {
614 CreateNew = true;
615 continue;
616 }
617 Bundles.emplace_back(Args&: Bundle);
618 }
619
620 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
621}
622
623CallBase *CallBase::removeOperandBundle(CallBase *CB, uint32_t ID,
624 Instruction *InsertPt) {
625 SmallVector<OperandBundleDef, 1> Bundles;
626 bool CreateNew = false;
627
628 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
629 auto Bundle = CB->getOperandBundleAt(Index: I);
630 if (Bundle.getTagID() == ID) {
631 CreateNew = true;
632 continue;
633 }
634 Bundles.emplace_back(Args&: Bundle);
635 }
636
637 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
638}
639
640bool CallBase::hasReadingOperandBundles() const {
641 // Implementation note: this is a conservative implementation of operand
642 // bundle semantics, where *any* non-assume operand bundle (other than
643 // ptrauth) forces a callsite to be at least readonly.
644 return hasOperandBundlesOtherThan(
645 IDs: {LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi}) &&
646 getIntrinsicID() != Intrinsic::assume;
647}
648
649bool CallBase::hasClobberingOperandBundles() const {
650 return hasOperandBundlesOtherThan(
651 IDs: {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
652 LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi}) &&
653 getIntrinsicID() != Intrinsic::assume;
654}
655
656MemoryEffects CallBase::getMemoryEffects() const {
657 MemoryEffects ME = getAttributes().getMemoryEffects();
658 if (auto *Fn = dyn_cast<Function>(Val: getCalledOperand())) {
659 MemoryEffects FnME = Fn->getMemoryEffects();
660 if (hasOperandBundles()) {
661 // TODO: Add a method to get memory effects for operand bundles instead.
662 if (hasReadingOperandBundles())
663 FnME |= MemoryEffects::readOnly();
664 if (hasClobberingOperandBundles())
665 FnME |= MemoryEffects::writeOnly();
666 }
667 ME &= FnME;
668 }
669 return ME;
670}
671void CallBase::setMemoryEffects(MemoryEffects ME) {
672 addFnAttr(Attr: Attribute::getWithMemoryEffects(Context&: getContext(), ME));
673}
674
675/// Determine if the function does not access memory.
676bool CallBase::doesNotAccessMemory() const {
677 return getMemoryEffects().doesNotAccessMemory();
678}
679void CallBase::setDoesNotAccessMemory() {
680 setMemoryEffects(MemoryEffects::none());
681}
682
683/// Determine if the function does not access or only reads memory.
684bool CallBase::onlyReadsMemory() const {
685 return getMemoryEffects().onlyReadsMemory();
686}
687void CallBase::setOnlyReadsMemory() {
688 setMemoryEffects(getMemoryEffects() & MemoryEffects::readOnly());
689}
690
691/// Determine if the function does not access or only writes memory.
692bool CallBase::onlyWritesMemory() const {
693 return getMemoryEffects().onlyWritesMemory();
694}
695void CallBase::setOnlyWritesMemory() {
696 setMemoryEffects(getMemoryEffects() & MemoryEffects::writeOnly());
697}
698
699/// Determine if the call can access memmory only using pointers based
700/// on its arguments.
701bool CallBase::onlyAccessesArgMemory() const {
702 return getMemoryEffects().onlyAccessesArgPointees();
703}
704void CallBase::setOnlyAccessesArgMemory() {
705 setMemoryEffects(getMemoryEffects() & MemoryEffects::argMemOnly());
706}
707
708/// Determine if the function may only access memory that is
709/// inaccessible from the IR.
710bool CallBase::onlyAccessesInaccessibleMemory() const {
711 return getMemoryEffects().onlyAccessesInaccessibleMem();
712}
713void CallBase::setOnlyAccessesInaccessibleMemory() {
714 setMemoryEffects(getMemoryEffects() & MemoryEffects::inaccessibleMemOnly());
715}
716
717/// Determine if the function may only access memory that is
718/// either inaccessible from the IR or pointed to by its arguments.
719bool CallBase::onlyAccessesInaccessibleMemOrArgMem() const {
720 return getMemoryEffects().onlyAccessesInaccessibleOrArgMem();
721}
722void CallBase::setOnlyAccessesInaccessibleMemOrArgMem() {
723 setMemoryEffects(getMemoryEffects() &
724 MemoryEffects::inaccessibleOrArgMemOnly());
725}
726
727//===----------------------------------------------------------------------===//
728// CallInst Implementation
729//===----------------------------------------------------------------------===//
730
731void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
732 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
733 this->FTy = FTy;
734 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
735 "NumOperands not set up?");
736
737#ifndef NDEBUG
738 assert((Args.size() == FTy->getNumParams() ||
739 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
740 "Calling a function with bad signature!");
741
742 for (unsigned i = 0; i != Args.size(); ++i)
743 assert((i >= FTy->getNumParams() ||
744 FTy->getParamType(i) == Args[i]->getType()) &&
745 "Calling a function with a bad signature!");
746#endif
747
748 // Set operands in order of their index to match use-list-order
749 // prediction.
750 llvm::copy(Range&: Args, Out: op_begin());
751 setCalledOperand(Func);
752
753 auto It = populateBundleOperandInfos(Bundles, BeginIndex: Args.size());
754 (void)It;
755 assert(It + 1 == op_end() && "Should add up!");
756
757 setName(NameStr);
758}
759
760void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
761 this->FTy = FTy;
762 assert(getNumOperands() == 1 && "NumOperands not set up?");
763 setCalledOperand(Func);
764
765 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
766
767 setName(NameStr);
768}
769
770CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
771 BasicBlock::iterator InsertBefore)
772 : CallBase(Ty->getReturnType(), Instruction::Call,
773 OperandTraits<CallBase>::op_end(U: this) - 1, 1, InsertBefore) {
774 init(FTy: Ty, Func, NameStr: Name);
775}
776
777CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
778 Instruction *InsertBefore)
779 : CallBase(Ty->getReturnType(), Instruction::Call,
780 OperandTraits<CallBase>::op_end(U: this) - 1, 1, InsertBefore) {
781 init(FTy: Ty, Func, NameStr: Name);
782}
783
784CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
785 BasicBlock *InsertAtEnd)
786 : CallBase(Ty->getReturnType(), Instruction::Call,
787 OperandTraits<CallBase>::op_end(U: this) - 1, 1, InsertAtEnd) {
788 init(FTy: Ty, Func, NameStr: Name);
789}
790
791CallInst::CallInst(const CallInst &CI)
792 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
793 OperandTraits<CallBase>::op_end(U: this) - CI.getNumOperands(),
794 CI.getNumOperands()) {
795 setTailCallKind(CI.getTailCallKind());
796 setCallingConv(CI.getCallingConv());
797
798 std::copy(first: CI.op_begin(), last: CI.op_end(), result: op_begin());
799 std::copy(first: CI.bundle_op_info_begin(), last: CI.bundle_op_info_end(),
800 result: bundle_op_info_begin());
801 SubclassOptionalData = CI.SubclassOptionalData;
802}
803
804CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,
805 BasicBlock::iterator InsertPt) {
806 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
807
808 auto *NewCI = CallInst::Create(Ty: CI->getFunctionType(), Func: CI->getCalledOperand(),
809 Args, Bundles: OpB, NameStr: CI->getName(), InsertBefore: InsertPt);
810 NewCI->setTailCallKind(CI->getTailCallKind());
811 NewCI->setCallingConv(CI->getCallingConv());
812 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
813 NewCI->setAttributes(CI->getAttributes());
814 NewCI->setDebugLoc(CI->getDebugLoc());
815 return NewCI;
816}
817
818CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,
819 Instruction *InsertPt) {
820 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
821
822 auto *NewCI = CallInst::Create(Ty: CI->getFunctionType(), Func: CI->getCalledOperand(),
823 Args, Bundles: OpB, NameStr: CI->getName(), InsertBefore: InsertPt);
824 NewCI->setTailCallKind(CI->getTailCallKind());
825 NewCI->setCallingConv(CI->getCallingConv());
826 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
827 NewCI->setAttributes(CI->getAttributes());
828 NewCI->setDebugLoc(CI->getDebugLoc());
829 return NewCI;
830}
831
832// Update profile weight for call instruction by scaling it using the ratio
833// of S/T. The meaning of "branch_weights" meta data for call instruction is
834// transfered to represent call count.
835void CallInst::updateProfWeight(uint64_t S, uint64_t T) {
836 if (T == 0) {
837 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
838 "div by 0. Ignoring. Likely the function "
839 << getParent()->getParent()->getName()
840 << " has 0 entry count, and contains call instructions "
841 "with non-zero prof info.");
842 return;
843 }
844 scaleProfData(I&: *this, S, T);
845}
846
847//===----------------------------------------------------------------------===//
848// InvokeInst Implementation
849//===----------------------------------------------------------------------===//
850
851void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
852 BasicBlock *IfException, ArrayRef<Value *> Args,
853 ArrayRef<OperandBundleDef> Bundles,
854 const Twine &NameStr) {
855 this->FTy = FTy;
856
857 assert((int)getNumOperands() ==
858 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
859 "NumOperands not set up?");
860
861#ifndef NDEBUG
862 assert(((Args.size() == FTy->getNumParams()) ||
863 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
864 "Invoking a function with bad signature");
865
866 for (unsigned i = 0, e = Args.size(); i != e; i++)
867 assert((i >= FTy->getNumParams() ||
868 FTy->getParamType(i) == Args[i]->getType()) &&
869 "Invoking a function with a bad signature!");
870#endif
871
872 // Set operands in order of their index to match use-list-order
873 // prediction.
874 llvm::copy(Range&: Args, Out: op_begin());
875 setNormalDest(IfNormal);
876 setUnwindDest(IfException);
877 setCalledOperand(Fn);
878
879 auto It = populateBundleOperandInfos(Bundles, BeginIndex: Args.size());
880 (void)It;
881 assert(It + 3 == op_end() && "Should add up!");
882
883 setName(NameStr);
884}
885
886InvokeInst::InvokeInst(const InvokeInst &II)
887 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
888 OperandTraits<CallBase>::op_end(U: this) - II.getNumOperands(),
889 II.getNumOperands()) {
890 setCallingConv(II.getCallingConv());
891 std::copy(first: II.op_begin(), last: II.op_end(), result: op_begin());
892 std::copy(first: II.bundle_op_info_begin(), last: II.bundle_op_info_end(),
893 result: bundle_op_info_begin());
894 SubclassOptionalData = II.SubclassOptionalData;
895}
896
897InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
898 BasicBlock::iterator InsertPt) {
899 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
900
901 auto *NewII = InvokeInst::Create(
902 Ty: II->getFunctionType(), Func: II->getCalledOperand(), IfNormal: II->getNormalDest(),
903 IfException: II->getUnwindDest(), Args, Bundles: OpB, NameStr: II->getName(), InsertBefore: InsertPt);
904 NewII->setCallingConv(II->getCallingConv());
905 NewII->SubclassOptionalData = II->SubclassOptionalData;
906 NewII->setAttributes(II->getAttributes());
907 NewII->setDebugLoc(II->getDebugLoc());
908 return NewII;
909}
910
911InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
912 Instruction *InsertPt) {
913 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
914
915 auto *NewII = InvokeInst::Create(
916 Ty: II->getFunctionType(), Func: II->getCalledOperand(), IfNormal: II->getNormalDest(),
917 IfException: II->getUnwindDest(), Args, Bundles: OpB, NameStr: II->getName(), InsertBefore: InsertPt);
918 NewII->setCallingConv(II->getCallingConv());
919 NewII->SubclassOptionalData = II->SubclassOptionalData;
920 NewII->setAttributes(II->getAttributes());
921 NewII->setDebugLoc(II->getDebugLoc());
922 return NewII;
923}
924
925LandingPadInst *InvokeInst::getLandingPadInst() const {
926 return cast<LandingPadInst>(Val: getUnwindDest()->getFirstNonPHI());
927}
928
929//===----------------------------------------------------------------------===//
930// CallBrInst Implementation
931//===----------------------------------------------------------------------===//
932
933void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
934 ArrayRef<BasicBlock *> IndirectDests,
935 ArrayRef<Value *> Args,
936 ArrayRef<OperandBundleDef> Bundles,
937 const Twine &NameStr) {
938 this->FTy = FTy;
939
940 assert((int)getNumOperands() ==
941 ComputeNumOperands(Args.size(), IndirectDests.size(),
942 CountBundleInputs(Bundles)) &&
943 "NumOperands not set up?");
944
945#ifndef NDEBUG
946 assert(((Args.size() == FTy->getNumParams()) ||
947 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
948 "Calling a function with bad signature");
949
950 for (unsigned i = 0, e = Args.size(); i != e; i++)
951 assert((i >= FTy->getNumParams() ||
952 FTy->getParamType(i) == Args[i]->getType()) &&
953 "Calling a function with a bad signature!");
954#endif
955
956 // Set operands in order of their index to match use-list-order
957 // prediction.
958 std::copy(first: Args.begin(), last: Args.end(), result: op_begin());
959 NumIndirectDests = IndirectDests.size();
960 setDefaultDest(Fallthrough);
961 for (unsigned i = 0; i != NumIndirectDests; ++i)
962 setIndirectDest(i, B: IndirectDests[i]);
963 setCalledOperand(Fn);
964
965 auto It = populateBundleOperandInfos(Bundles, BeginIndex: Args.size());
966 (void)It;
967 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
968
969 setName(NameStr);
970}
971
972CallBrInst::CallBrInst(const CallBrInst &CBI)
973 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
974 OperandTraits<CallBase>::op_end(U: this) - CBI.getNumOperands(),
975 CBI.getNumOperands()) {
976 setCallingConv(CBI.getCallingConv());
977 std::copy(first: CBI.op_begin(), last: CBI.op_end(), result: op_begin());
978 std::copy(first: CBI.bundle_op_info_begin(), last: CBI.bundle_op_info_end(),
979 result: bundle_op_info_begin());
980 SubclassOptionalData = CBI.SubclassOptionalData;
981 NumIndirectDests = CBI.NumIndirectDests;
982}
983
984CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
985 BasicBlock::iterator InsertPt) {
986 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
987
988 auto *NewCBI = CallBrInst::Create(
989 Ty: CBI->getFunctionType(), Func: CBI->getCalledOperand(), DefaultDest: CBI->getDefaultDest(),
990 IndirectDests: CBI->getIndirectDests(), Args, Bundles: OpB, NameStr: CBI->getName(), InsertBefore: InsertPt);
991 NewCBI->setCallingConv(CBI->getCallingConv());
992 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
993 NewCBI->setAttributes(CBI->getAttributes());
994 NewCBI->setDebugLoc(CBI->getDebugLoc());
995 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
996 return NewCBI;
997}
998
999CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
1000 Instruction *InsertPt) {
1001 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
1002
1003 auto *NewCBI = CallBrInst::Create(
1004 Ty: CBI->getFunctionType(), Func: CBI->getCalledOperand(), DefaultDest: CBI->getDefaultDest(),
1005 IndirectDests: CBI->getIndirectDests(), Args, Bundles: OpB, NameStr: CBI->getName(), InsertBefore: InsertPt);
1006 NewCBI->setCallingConv(CBI->getCallingConv());
1007 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
1008 NewCBI->setAttributes(CBI->getAttributes());
1009 NewCBI->setDebugLoc(CBI->getDebugLoc());
1010 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
1011 return NewCBI;
1012}
1013
1014//===----------------------------------------------------------------------===//
1015// ReturnInst Implementation
1016//===----------------------------------------------------------------------===//
1017
1018ReturnInst::ReturnInst(const ReturnInst &RI)
1019 : Instruction(Type::getVoidTy(C&: RI.getContext()), Instruction::Ret,
1020 OperandTraits<ReturnInst>::op_end(U: this) - RI.getNumOperands(),
1021 RI.getNumOperands()) {
1022 if (RI.getNumOperands())
1023 Op<0>() = RI.Op<0>();
1024 SubclassOptionalData = RI.SubclassOptionalData;
1025}
1026
1027ReturnInst::ReturnInst(LLVMContext &C, Value *retVal,
1028 BasicBlock::iterator InsertBefore)
1029 : Instruction(Type::getVoidTy(C), Instruction::Ret,
1030 OperandTraits<ReturnInst>::op_end(U: this) - !!retVal, !!retVal,
1031 InsertBefore) {
1032 if (retVal)
1033 Op<0>() = retVal;
1034}
1035
1036ReturnInst::ReturnInst(LLVMContext &C, Value *retVal,
1037 Instruction *InsertBefore)
1038 : Instruction(Type::getVoidTy(C), Instruction::Ret,
1039 OperandTraits<ReturnInst>::op_end(U: this) - !!retVal, !!retVal,
1040 InsertBefore) {
1041 if (retVal)
1042 Op<0>() = retVal;
1043}
1044
1045ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
1046 : Instruction(Type::getVoidTy(C), Instruction::Ret,
1047 OperandTraits<ReturnInst>::op_end(U: this) - !!retVal, !!retVal,
1048 InsertAtEnd) {
1049 if (retVal)
1050 Op<0>() = retVal;
1051}
1052
1053ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
1054 : Instruction(Type::getVoidTy(C&: Context), Instruction::Ret,
1055 OperandTraits<ReturnInst>::op_end(U: this), 0, InsertAtEnd) {}
1056
1057//===----------------------------------------------------------------------===//
1058// ResumeInst Implementation
1059//===----------------------------------------------------------------------===//
1060
1061ResumeInst::ResumeInst(const ResumeInst &RI)
1062 : Instruction(Type::getVoidTy(C&: RI.getContext()), Instruction::Resume,
1063 OperandTraits<ResumeInst>::op_begin(U: this), 1) {
1064 Op<0>() = RI.Op<0>();
1065}
1066
1067ResumeInst::ResumeInst(Value *Exn, BasicBlock::iterator InsertBefore)
1068 : Instruction(Type::getVoidTy(C&: Exn->getContext()), Instruction::Resume,
1069 OperandTraits<ResumeInst>::op_begin(U: this), 1, InsertBefore) {
1070 Op<0>() = Exn;
1071}
1072
1073ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
1074 : Instruction(Type::getVoidTy(C&: Exn->getContext()), Instruction::Resume,
1075 OperandTraits<ResumeInst>::op_begin(U: this), 1, InsertBefore) {
1076 Op<0>() = Exn;
1077}
1078
1079ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
1080 : Instruction(Type::getVoidTy(C&: Exn->getContext()), Instruction::Resume,
1081 OperandTraits<ResumeInst>::op_begin(U: this), 1, InsertAtEnd) {
1082 Op<0>() = Exn;
1083}
1084
1085//===----------------------------------------------------------------------===//
1086// CleanupReturnInst Implementation
1087//===----------------------------------------------------------------------===//
1088
1089CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
1090 : Instruction(CRI.getType(), Instruction::CleanupRet,
1091 OperandTraits<CleanupReturnInst>::op_end(U: this) -
1092 CRI.getNumOperands(),
1093 CRI.getNumOperands()) {
1094 setSubclassData<Instruction::OpaqueField>(
1095 CRI.getSubclassData<Instruction::OpaqueField>());
1096 Op<0>() = CRI.Op<0>();
1097 if (CRI.hasUnwindDest())
1098 Op<1>() = CRI.Op<1>();
1099}
1100
1101void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1102 if (UnwindBB)
1103 setSubclassData<UnwindDestField>(true);
1104
1105 Op<0>() = CleanupPad;
1106 if (UnwindBB)
1107 Op<1>() = UnwindBB;
1108}
1109
1110CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1111 unsigned Values,
1112 BasicBlock::iterator InsertBefore)
1113 : Instruction(Type::getVoidTy(C&: CleanupPad->getContext()),
1114 Instruction::CleanupRet,
1115 OperandTraits<CleanupReturnInst>::op_end(U: this) - Values,
1116 Values, InsertBefore) {
1117 init(CleanupPad, UnwindBB);
1118}
1119
1120CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1121 unsigned Values, Instruction *InsertBefore)
1122 : Instruction(Type::getVoidTy(C&: CleanupPad->getContext()),
1123 Instruction::CleanupRet,
1124 OperandTraits<CleanupReturnInst>::op_end(U: this) - Values,
1125 Values, InsertBefore) {
1126 init(CleanupPad, UnwindBB);
1127}
1128
1129CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1130 unsigned Values, BasicBlock *InsertAtEnd)
1131 : Instruction(Type::getVoidTy(C&: CleanupPad->getContext()),
1132 Instruction::CleanupRet,
1133 OperandTraits<CleanupReturnInst>::op_end(U: this) - Values,
1134 Values, InsertAtEnd) {
1135 init(CleanupPad, UnwindBB);
1136}
1137
1138//===----------------------------------------------------------------------===//
1139// CatchReturnInst Implementation
1140//===----------------------------------------------------------------------===//
1141void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1142 Op<0>() = CatchPad;
1143 Op<1>() = BB;
1144}
1145
1146CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1147 : Instruction(Type::getVoidTy(C&: CRI.getContext()), Instruction::CatchRet,
1148 OperandTraits<CatchReturnInst>::op_begin(U: this), 2) {
1149 Op<0>() = CRI.Op<0>();
1150 Op<1>() = CRI.Op<1>();
1151}
1152
1153CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1154 BasicBlock::iterator InsertBefore)
1155 : Instruction(Type::getVoidTy(C&: BB->getContext()), Instruction::CatchRet,
1156 OperandTraits<CatchReturnInst>::op_begin(U: this), 2,
1157 InsertBefore) {
1158 init(CatchPad, BB);
1159}
1160
1161CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1162 Instruction *InsertBefore)
1163 : Instruction(Type::getVoidTy(C&: BB->getContext()), Instruction::CatchRet,
1164 OperandTraits<CatchReturnInst>::op_begin(U: this), 2,
1165 InsertBefore) {
1166 init(CatchPad, BB);
1167}
1168
1169CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1170 BasicBlock *InsertAtEnd)
1171 : Instruction(Type::getVoidTy(C&: BB->getContext()), Instruction::CatchRet,
1172 OperandTraits<CatchReturnInst>::op_begin(U: this), 2,
1173 InsertAtEnd) {
1174 init(CatchPad, BB);
1175}
1176
1177//===----------------------------------------------------------------------===//
1178// CatchSwitchInst Implementation
1179//===----------------------------------------------------------------------===//
1180
1181CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1182 unsigned NumReservedValues,
1183 const Twine &NameStr,
1184 BasicBlock::iterator InsertBefore)
1185 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1186 InsertBefore) {
1187 if (UnwindDest)
1188 ++NumReservedValues;
1189 init(ParentPad, UnwindDest, NumReserved: NumReservedValues + 1);
1190 setName(NameStr);
1191}
1192
1193CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1194 unsigned NumReservedValues,
1195 const Twine &NameStr,
1196 Instruction *InsertBefore)
1197 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1198 InsertBefore) {
1199 if (UnwindDest)
1200 ++NumReservedValues;
1201 init(ParentPad, UnwindDest, NumReserved: NumReservedValues + 1);
1202 setName(NameStr);
1203}
1204
1205CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1206 unsigned NumReservedValues,
1207 const Twine &NameStr, BasicBlock *InsertAtEnd)
1208 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1209 InsertAtEnd) {
1210 if (UnwindDest)
1211 ++NumReservedValues;
1212 init(ParentPad, UnwindDest, NumReserved: NumReservedValues + 1);
1213 setName(NameStr);
1214}
1215
1216CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1217 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
1218 CSI.getNumOperands()) {
1219 init(ParentPad: CSI.getParentPad(), UnwindDest: CSI.getUnwindDest(), NumReserved: CSI.getNumOperands());
1220 setNumHungOffUseOperands(ReservedSpace);
1221 Use *OL = getOperandList();
1222 const Use *InOL = CSI.getOperandList();
1223 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1224 OL[I] = InOL[I];
1225}
1226
1227void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1228 unsigned NumReservedValues) {
1229 assert(ParentPad && NumReservedValues);
1230
1231 ReservedSpace = NumReservedValues;
1232 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1233 allocHungoffUses(N: ReservedSpace);
1234
1235 Op<0>() = ParentPad;
1236 if (UnwindDest) {
1237 setSubclassData<UnwindDestField>(true);
1238 setUnwindDest(UnwindDest);
1239 }
1240}
1241
1242/// growOperands - grow operands - This grows the operand list in response to a
1243/// push_back style of operation. This grows the number of ops by 2 times.
1244void CatchSwitchInst::growOperands(unsigned Size) {
1245 unsigned NumOperands = getNumOperands();
1246 assert(NumOperands >= 1);
1247 if (ReservedSpace >= NumOperands + Size)
1248 return;
1249 ReservedSpace = (NumOperands + Size / 2) * 2;
1250 growHungoffUses(N: ReservedSpace);
1251}
1252
1253void CatchSwitchInst::addHandler(BasicBlock *Handler) {
1254 unsigned OpNo = getNumOperands();
1255 growOperands(Size: 1);
1256 assert(OpNo < ReservedSpace && "Growing didn't work!");
1257 setNumHungOffUseOperands(getNumOperands() + 1);
1258 getOperandList()[OpNo] = Handler;
1259}
1260
1261void CatchSwitchInst::removeHandler(handler_iterator HI) {
1262 // Move all subsequent handlers up one.
1263 Use *EndDst = op_end() - 1;
1264 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1265 *CurDst = *(CurDst + 1);
1266 // Null out the last handler use.
1267 *EndDst = nullptr;
1268
1269 setNumHungOffUseOperands(getNumOperands() - 1);
1270}
1271
1272//===----------------------------------------------------------------------===//
1273// FuncletPadInst Implementation
1274//===----------------------------------------------------------------------===//
1275void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1276 const Twine &NameStr) {
1277 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1278 llvm::copy(Range&: Args, Out: op_begin());
1279 setParentPad(ParentPad);
1280 setName(NameStr);
1281}
1282
1283FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
1284 : Instruction(FPI.getType(), FPI.getOpcode(),
1285 OperandTraits<FuncletPadInst>::op_end(U: this) -
1286 FPI.getNumOperands(),
1287 FPI.getNumOperands()) {
1288 std::copy(first: FPI.op_begin(), last: FPI.op_end(), result: op_begin());
1289 setParentPad(FPI.getParentPad());
1290}
1291
1292FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1293 ArrayRef<Value *> Args, unsigned Values,
1294 const Twine &NameStr,
1295 BasicBlock::iterator InsertBefore)
1296 : Instruction(ParentPad->getType(), Op,
1297 OperandTraits<FuncletPadInst>::op_end(U: this) - Values, Values,
1298 InsertBefore) {
1299 init(ParentPad, Args, NameStr);
1300}
1301
1302FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1303 ArrayRef<Value *> Args, unsigned Values,
1304 const Twine &NameStr, Instruction *InsertBefore)
1305 : Instruction(ParentPad->getType(), Op,
1306 OperandTraits<FuncletPadInst>::op_end(U: this) - Values, Values,
1307 InsertBefore) {
1308 init(ParentPad, Args, NameStr);
1309}
1310
1311FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1312 ArrayRef<Value *> Args, unsigned Values,
1313 const Twine &NameStr, BasicBlock *InsertAtEnd)
1314 : Instruction(ParentPad->getType(), Op,
1315 OperandTraits<FuncletPadInst>::op_end(U: this) - Values, Values,
1316 InsertAtEnd) {
1317 init(ParentPad, Args, NameStr);
1318}
1319
1320//===----------------------------------------------------------------------===//
1321// UnreachableInst Implementation
1322//===----------------------------------------------------------------------===//
1323
1324UnreachableInst::UnreachableInst(LLVMContext &Context,
1325 BasicBlock::iterator InsertBefore)
1326 : Instruction(Type::getVoidTy(C&: Context), Instruction::Unreachable, nullptr,
1327 0, InsertBefore) {}
1328UnreachableInst::UnreachableInst(LLVMContext &Context,
1329 Instruction *InsertBefore)
1330 : Instruction(Type::getVoidTy(C&: Context), Instruction::Unreachable, nullptr,
1331 0, InsertBefore) {}
1332UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
1333 : Instruction(Type::getVoidTy(C&: Context), Instruction::Unreachable, nullptr,
1334 0, InsertAtEnd) {}
1335
1336//===----------------------------------------------------------------------===//
1337// BranchInst Implementation
1338//===----------------------------------------------------------------------===//
1339
1340void BranchInst::AssertOK() {
1341 if (isConditional())
1342 assert(getCondition()->getType()->isIntegerTy(1) &&
1343 "May only branch on boolean predicates!");
1344}
1345
1346BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock::iterator InsertBefore)
1347 : Instruction(Type::getVoidTy(C&: IfTrue->getContext()), Instruction::Br,
1348 OperandTraits<BranchInst>::op_end(U: this) - 1, 1,
1349 InsertBefore) {
1350 assert(IfTrue && "Branch destination may not be null!");
1351 Op<-1>() = IfTrue;
1352}
1353
1354BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
1355 : Instruction(Type::getVoidTy(C&: IfTrue->getContext()), Instruction::Br,
1356 OperandTraits<BranchInst>::op_end(U: this) - 1, 1,
1357 InsertBefore) {
1358 assert(IfTrue && "Branch destination may not be null!");
1359 Op<-1>() = IfTrue;
1360}
1361
1362BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1363 BasicBlock::iterator InsertBefore)
1364 : Instruction(Type::getVoidTy(C&: IfTrue->getContext()), Instruction::Br,
1365 OperandTraits<BranchInst>::op_end(U: this) - 3, 3,
1366 InsertBefore) {
1367 // Assign in order of operand index to make use-list order predictable.
1368 Op<-3>() = Cond;
1369 Op<-2>() = IfFalse;
1370 Op<-1>() = IfTrue;
1371#ifndef NDEBUG
1372 AssertOK();
1373#endif
1374}
1375
1376BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1377 Instruction *InsertBefore)
1378 : Instruction(Type::getVoidTy(C&: IfTrue->getContext()), Instruction::Br,
1379 OperandTraits<BranchInst>::op_end(U: this) - 3, 3,
1380 InsertBefore) {
1381 // Assign in order of operand index to make use-list order predictable.
1382 Op<-3>() = Cond;
1383 Op<-2>() = IfFalse;
1384 Op<-1>() = IfTrue;
1385#ifndef NDEBUG
1386 AssertOK();
1387#endif
1388}
1389
1390BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
1391 : Instruction(Type::getVoidTy(C&: IfTrue->getContext()), Instruction::Br,
1392 OperandTraits<BranchInst>::op_end(U: this) - 1, 1, InsertAtEnd) {
1393 assert(IfTrue && "Branch destination may not be null!");
1394 Op<-1>() = IfTrue;
1395}
1396
1397BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1398 BasicBlock *InsertAtEnd)
1399 : Instruction(Type::getVoidTy(C&: IfTrue->getContext()), Instruction::Br,
1400 OperandTraits<BranchInst>::op_end(U: this) - 3, 3, InsertAtEnd) {
1401 // Assign in order of operand index to make use-list order predictable.
1402 Op<-3>() = Cond;
1403 Op<-2>() = IfFalse;
1404 Op<-1>() = IfTrue;
1405#ifndef NDEBUG
1406 AssertOK();
1407#endif
1408}
1409
1410BranchInst::BranchInst(const BranchInst &BI)
1411 : Instruction(Type::getVoidTy(C&: BI.getContext()), Instruction::Br,
1412 OperandTraits<BranchInst>::op_end(U: this) - BI.getNumOperands(),
1413 BI.getNumOperands()) {
1414 // Assign in order of operand index to make use-list order predictable.
1415 if (BI.getNumOperands() != 1) {
1416 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1417 Op<-3>() = BI.Op<-3>();
1418 Op<-2>() = BI.Op<-2>();
1419 }
1420 Op<-1>() = BI.Op<-1>();
1421 SubclassOptionalData = BI.SubclassOptionalData;
1422}
1423
1424void BranchInst::swapSuccessors() {
1425 assert(isConditional() &&
1426 "Cannot swap successors of an unconditional branch");
1427 Op<-1>().swap(RHS&: Op<-2>());
1428
1429 // Update profile metadata if present and it matches our structural
1430 // expectations.
1431 swapProfMetadata();
1432}
1433
1434//===----------------------------------------------------------------------===//
1435// AllocaInst Implementation
1436//===----------------------------------------------------------------------===//
1437
1438static Value *getAISize(LLVMContext &Context, Value *Amt) {
1439 if (!Amt)
1440 Amt = ConstantInt::get(Ty: Type::getInt32Ty(C&: Context), V: 1);
1441 else {
1442 assert(!isa<BasicBlock>(Amt) &&
1443 "Passed basic block into allocation size parameter! Use other ctor");
1444 assert(Amt->getType()->isIntegerTy() &&
1445 "Allocation array size is not an integer!");
1446 }
1447 return Amt;
1448}
1449
1450static Align computeAllocaDefaultAlign(Type *Ty, BasicBlock *BB) {
1451 assert(BB && "Insertion BB cannot be null when alignment not provided!");
1452 assert(BB->getParent() &&
1453 "BB must be in a Function when alignment not provided!");
1454 const DataLayout &DL = BB->getModule()->getDataLayout();
1455 return DL.getPrefTypeAlign(Ty);
1456}
1457
1458static Align computeAllocaDefaultAlign(Type *Ty, BasicBlock::iterator It) {
1459 return computeAllocaDefaultAlign(Ty, BB: It->getParent());
1460}
1461
1462static Align computeAllocaDefaultAlign(Type *Ty, Instruction *I) {
1463 assert(I && "Insertion position cannot be null when alignment not provided!");
1464 return computeAllocaDefaultAlign(Ty, BB: I->getParent());
1465}
1466
1467AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1468 BasicBlock::iterator InsertBefore)
1469 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1470
1471AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1472 Instruction *InsertBefore)
1473 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1474
1475AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1476 BasicBlock *InsertAtEnd)
1477 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
1478
1479AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1480 const Twine &Name, BasicBlock::iterator InsertBefore)
1481 : AllocaInst(Ty, AddrSpace, ArraySize,
1482 computeAllocaDefaultAlign(Ty, It: InsertBefore), Name,
1483 InsertBefore) {}
1484
1485AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1486 const Twine &Name, Instruction *InsertBefore)
1487 : AllocaInst(Ty, AddrSpace, ArraySize,
1488 computeAllocaDefaultAlign(Ty, I: InsertBefore), Name,
1489 InsertBefore) {}
1490
1491AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1492 const Twine &Name, BasicBlock *InsertAtEnd)
1493 : AllocaInst(Ty, AddrSpace, ArraySize,
1494 computeAllocaDefaultAlign(Ty, BB: InsertAtEnd), Name,
1495 InsertAtEnd) {}
1496
1497AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1498 Align Align, const Twine &Name,
1499 BasicBlock::iterator InsertBefore)
1500 : UnaryInstruction(PointerType::get(ElementType: Ty, AddressSpace: AddrSpace), Alloca,
1501 getAISize(Context&: Ty->getContext(), Amt: ArraySize), InsertBefore),
1502 AllocatedType(Ty) {
1503 setAlignment(Align);
1504 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1505 setName(Name);
1506}
1507
1508AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1509 Align Align, const Twine &Name,
1510 Instruction *InsertBefore)
1511 : UnaryInstruction(PointerType::get(ElementType: Ty, AddressSpace: AddrSpace), Alloca,
1512 getAISize(Context&: Ty->getContext(), Amt: ArraySize), InsertBefore),
1513 AllocatedType(Ty) {
1514 setAlignment(Align);
1515 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1516 setName(Name);
1517}
1518
1519AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1520 Align Align, const Twine &Name, BasicBlock *InsertAtEnd)
1521 : UnaryInstruction(PointerType::get(ElementType: Ty, AddressSpace: AddrSpace), Alloca,
1522 getAISize(Context&: Ty->getContext(), Amt: ArraySize), InsertAtEnd),
1523 AllocatedType(Ty) {
1524 setAlignment(Align);
1525 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1526 setName(Name);
1527}
1528
1529
1530bool AllocaInst::isArrayAllocation() const {
1531 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val: getOperand(i_nocapture: 0)))
1532 return !CI->isOne();
1533 return true;
1534}
1535
1536/// isStaticAlloca - Return true if this alloca is in the entry block of the
1537/// function and is a constant size. If so, the code generator will fold it
1538/// into the prolog/epilog code, so it is basically free.
1539bool AllocaInst::isStaticAlloca() const {
1540 // Must be constant size.
1541 if (!isa<ConstantInt>(Val: getArraySize())) return false;
1542
1543 // Must be in the entry block.
1544 const BasicBlock *Parent = getParent();
1545 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1546}
1547
1548//===----------------------------------------------------------------------===//
1549// LoadInst Implementation
1550//===----------------------------------------------------------------------===//
1551
1552void LoadInst::AssertOK() {
1553 assert(getOperand(0)->getType()->isPointerTy() &&
1554 "Ptr must have pointer type.");
1555}
1556
1557static Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock *BB) {
1558 assert(BB && "Insertion BB cannot be null when alignment not provided!");
1559 assert(BB->getParent() &&
1560 "BB must be in a Function when alignment not provided!");
1561 const DataLayout &DL = BB->getModule()->getDataLayout();
1562 return DL.getABITypeAlign(Ty);
1563}
1564
1565static Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock::iterator It) {
1566 return computeLoadStoreDefaultAlign(Ty, BB: It->getParent());
1567}
1568
1569static Align computeLoadStoreDefaultAlign(Type *Ty, Instruction *I) {
1570 assert(I && "Insertion position cannot be null when alignment not provided!");
1571 return computeLoadStoreDefaultAlign(Ty, BB: I->getParent());
1572}
1573
1574LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1575 BasicBlock::iterator InsertBef)
1576 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1577
1578LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1579 Instruction *InsertBef)
1580 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1581
1582LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1583 BasicBlock *InsertAE)
1584 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
1585
1586LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1587 BasicBlock::iterator InsertBef)
1588 : LoadInst(Ty, Ptr, Name, isVolatile,
1589 computeLoadStoreDefaultAlign(Ty, It: InsertBef), InsertBef) {}
1590
1591LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1592 Instruction *InsertBef)
1593 : LoadInst(Ty, Ptr, Name, isVolatile,
1594 computeLoadStoreDefaultAlign(Ty, I: InsertBef), InsertBef) {}
1595
1596LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1597 BasicBlock *InsertAE)
1598 : LoadInst(Ty, Ptr, Name, isVolatile,
1599 computeLoadStoreDefaultAlign(Ty, BB: InsertAE), InsertAE) {}
1600
1601LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1602 Align Align, BasicBlock::iterator InsertBef)
1603 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1604 SyncScope::System, InsertBef) {}
1605
1606LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1607 Align Align, Instruction *InsertBef)
1608 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1609 SyncScope::System, InsertBef) {}
1610
1611LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1612 Align Align, BasicBlock *InsertAE)
1613 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1614 SyncScope::System, InsertAE) {}
1615
1616LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1617 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
1618 BasicBlock::iterator InsertBef)
1619 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1620 setVolatile(isVolatile);
1621 setAlignment(Align);
1622 setAtomic(Ordering: Order, SSID);
1623 AssertOK();
1624 setName(Name);
1625}
1626
1627LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1628 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
1629 Instruction *InsertBef)
1630 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1631 setVolatile(isVolatile);
1632 setAlignment(Align);
1633 setAtomic(Ordering: Order, SSID);
1634 AssertOK();
1635 setName(Name);
1636}
1637
1638LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1639 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
1640 BasicBlock *InsertAE)
1641 : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
1642 setVolatile(isVolatile);
1643 setAlignment(Align);
1644 setAtomic(Ordering: Order, SSID);
1645 AssertOK();
1646 setName(Name);
1647}
1648
1649//===----------------------------------------------------------------------===//
1650// StoreInst Implementation
1651//===----------------------------------------------------------------------===//
1652
1653void StoreInst::AssertOK() {
1654 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1655 assert(getOperand(1)->getType()->isPointerTy() &&
1656 "Ptr must have pointer type!");
1657}
1658
1659StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
1660 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1661
1662StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
1663 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
1664
1665StoreInst::StoreInst(Value *val, Value *addr, BasicBlock::iterator InsertBefore)
1666 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1667
1668StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1669 Instruction *InsertBefore)
1670 : StoreInst(val, addr, isVolatile,
1671 computeLoadStoreDefaultAlign(Ty: val->getType(), I: InsertBefore),
1672 InsertBefore) {}
1673
1674StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1675 BasicBlock *InsertAtEnd)
1676 : StoreInst(val, addr, isVolatile,
1677 computeLoadStoreDefaultAlign(Ty: val->getType(), BB: InsertAtEnd),
1678 InsertAtEnd) {}
1679
1680StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1681 BasicBlock::iterator InsertBefore)
1682 : StoreInst(val, addr, isVolatile,
1683 computeLoadStoreDefaultAlign(Ty: val->getType(), I: &*InsertBefore),
1684 InsertBefore) {}
1685
1686StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1687 Instruction *InsertBefore)
1688 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1689 SyncScope::System, InsertBefore) {}
1690
1691StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1692 BasicBlock *InsertAtEnd)
1693 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1694 SyncScope::System, InsertAtEnd) {}
1695
1696StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1697 BasicBlock::iterator InsertBefore)
1698 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1699 SyncScope::System, InsertBefore) {}
1700
1701StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1702 AtomicOrdering Order, SyncScope::ID SSID,
1703 Instruction *InsertBefore)
1704 : Instruction(Type::getVoidTy(C&: val->getContext()), Store,
1705 OperandTraits<StoreInst>::op_begin(U: this),
1706 OperandTraits<StoreInst>::operands(this), InsertBefore) {
1707 Op<0>() = val;
1708 Op<1>() = addr;
1709 setVolatile(isVolatile);
1710 setAlignment(Align);
1711 setAtomic(Ordering: Order, SSID);
1712 AssertOK();
1713}
1714
1715StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1716 AtomicOrdering Order, SyncScope::ID SSID,
1717 BasicBlock *InsertAtEnd)
1718 : Instruction(Type::getVoidTy(C&: val->getContext()), Store,
1719 OperandTraits<StoreInst>::op_begin(U: this),
1720 OperandTraits<StoreInst>::operands(this), InsertAtEnd) {
1721 Op<0>() = val;
1722 Op<1>() = addr;
1723 setVolatile(isVolatile);
1724 setAlignment(Align);
1725 setAtomic(Ordering: Order, SSID);
1726 AssertOK();
1727}
1728
1729StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1730 AtomicOrdering Order, SyncScope::ID SSID,
1731 BasicBlock::iterator InsertBefore)
1732 : Instruction(Type::getVoidTy(C&: val->getContext()), Store,
1733 OperandTraits<StoreInst>::op_begin(U: this),
1734 OperandTraits<StoreInst>::operands(this)) {
1735 Op<0>() = val;
1736 Op<1>() = addr;
1737 setVolatile(isVolatile);
1738 setAlignment(Align);
1739 setAtomic(Ordering: Order, SSID);
1740 insertBefore(BB&: *InsertBefore->getParent(), InsertPos: InsertBefore);
1741 AssertOK();
1742}
1743
1744//===----------------------------------------------------------------------===//
1745// AtomicCmpXchgInst Implementation
1746//===----------------------------------------------------------------------===//
1747
1748void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1749 Align Alignment, AtomicOrdering SuccessOrdering,
1750 AtomicOrdering FailureOrdering,
1751 SyncScope::ID SSID) {
1752 Op<0>() = Ptr;
1753 Op<1>() = Cmp;
1754 Op<2>() = NewVal;
1755 setSuccessOrdering(SuccessOrdering);
1756 setFailureOrdering(FailureOrdering);
1757 setSyncScopeID(SSID);
1758 setAlignment(Alignment);
1759
1760 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1761 "All operands must be non-null!");
1762 assert(getOperand(0)->getType()->isPointerTy() &&
1763 "Ptr must have pointer type!");
1764 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1765 "Cmp type and NewVal type must be same!");
1766}
1767
1768AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
1769 Align Alignment,
1770 AtomicOrdering SuccessOrdering,
1771 AtomicOrdering FailureOrdering,
1772 SyncScope::ID SSID,
1773 BasicBlock::iterator InsertBefore)
1774 : Instruction(
1775 StructType::get(elt1: Cmp->getType(), elts: Type::getInt1Ty(C&: Cmp->getContext())),
1776 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(U: this),
1777 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1778 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1779}
1780
1781AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
1782 Align Alignment,
1783 AtomicOrdering SuccessOrdering,
1784 AtomicOrdering FailureOrdering,
1785 SyncScope::ID SSID,
1786 Instruction *InsertBefore)
1787 : Instruction(
1788 StructType::get(elt1: Cmp->getType(), elts: Type::getInt1Ty(C&: Cmp->getContext())),
1789 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(U: this),
1790 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1791 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1792}
1793
1794AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
1795 Align Alignment,
1796 AtomicOrdering SuccessOrdering,
1797 AtomicOrdering FailureOrdering,
1798 SyncScope::ID SSID,
1799 BasicBlock *InsertAtEnd)
1800 : Instruction(
1801 StructType::get(elt1: Cmp->getType(), elts: Type::getInt1Ty(C&: Cmp->getContext())),
1802 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(U: this),
1803 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
1804 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1805}
1806
1807//===----------------------------------------------------------------------===//
1808// AtomicRMWInst Implementation
1809//===----------------------------------------------------------------------===//
1810
1811void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1812 Align Alignment, AtomicOrdering Ordering,
1813 SyncScope::ID SSID) {
1814 assert(Ordering != AtomicOrdering::NotAtomic &&
1815 "atomicrmw instructions can only be atomic.");
1816 assert(Ordering != AtomicOrdering::Unordered &&
1817 "atomicrmw instructions cannot be unordered.");
1818 Op<0>() = Ptr;
1819 Op<1>() = Val;
1820 setOperation(Operation);
1821 setOrdering(Ordering);
1822 setSyncScopeID(SSID);
1823 setAlignment(Alignment);
1824
1825 assert(getOperand(0) && getOperand(1) &&
1826 "All operands must be non-null!");
1827 assert(getOperand(0)->getType()->isPointerTy() &&
1828 "Ptr must have pointer type!");
1829 assert(Ordering != AtomicOrdering::NotAtomic &&
1830 "AtomicRMW instructions must be atomic!");
1831}
1832
1833AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
1834 Align Alignment, AtomicOrdering Ordering,
1835 SyncScope::ID SSID,
1836 BasicBlock::iterator InsertBefore)
1837 : Instruction(Val->getType(), AtomicRMW,
1838 OperandTraits<AtomicRMWInst>::op_begin(U: this),
1839 OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1840 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1841}
1842
1843AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
1844 Align Alignment, AtomicOrdering Ordering,
1845 SyncScope::ID SSID, Instruction *InsertBefore)
1846 : Instruction(Val->getType(), AtomicRMW,
1847 OperandTraits<AtomicRMWInst>::op_begin(U: this),
1848 OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1849 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1850}
1851
1852AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
1853 Align Alignment, AtomicOrdering Ordering,
1854 SyncScope::ID SSID, BasicBlock *InsertAtEnd)
1855 : Instruction(Val->getType(), AtomicRMW,
1856 OperandTraits<AtomicRMWInst>::op_begin(U: this),
1857 OperandTraits<AtomicRMWInst>::operands(this), InsertAtEnd) {
1858 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1859}
1860
1861StringRef AtomicRMWInst::getOperationName(BinOp Op) {
1862 switch (Op) {
1863 case AtomicRMWInst::Xchg:
1864 return "xchg";
1865 case AtomicRMWInst::Add:
1866 return "add";
1867 case AtomicRMWInst::Sub:
1868 return "sub";
1869 case AtomicRMWInst::And:
1870 return "and";
1871 case AtomicRMWInst::Nand:
1872 return "nand";
1873 case AtomicRMWInst::Or:
1874 return "or";
1875 case AtomicRMWInst::Xor:
1876 return "xor";
1877 case AtomicRMWInst::Max:
1878 return "max";
1879 case AtomicRMWInst::Min:
1880 return "min";
1881 case AtomicRMWInst::UMax:
1882 return "umax";
1883 case AtomicRMWInst::UMin:
1884 return "umin";
1885 case AtomicRMWInst::FAdd:
1886 return "fadd";
1887 case AtomicRMWInst::FSub:
1888 return "fsub";
1889 case AtomicRMWInst::FMax:
1890 return "fmax";
1891 case AtomicRMWInst::FMin:
1892 return "fmin";
1893 case AtomicRMWInst::UIncWrap:
1894 return "uinc_wrap";
1895 case AtomicRMWInst::UDecWrap:
1896 return "udec_wrap";
1897 case AtomicRMWInst::BAD_BINOP:
1898 return "<invalid operation>";
1899 }
1900
1901 llvm_unreachable("invalid atomicrmw operation");
1902}
1903
1904//===----------------------------------------------------------------------===//
1905// FenceInst Implementation
1906//===----------------------------------------------------------------------===//
1907
1908FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
1909 SyncScope::ID SSID, BasicBlock::iterator InsertBefore)
1910 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1911 setOrdering(Ordering);
1912 setSyncScopeID(SSID);
1913}
1914
1915FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
1916 SyncScope::ID SSID,
1917 Instruction *InsertBefore)
1918 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1919 setOrdering(Ordering);
1920 setSyncScopeID(SSID);
1921}
1922
1923FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
1924 SyncScope::ID SSID,
1925 BasicBlock *InsertAtEnd)
1926 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
1927 setOrdering(Ordering);
1928 setSyncScopeID(SSID);
1929}
1930
1931//===----------------------------------------------------------------------===//
1932// GetElementPtrInst Implementation
1933//===----------------------------------------------------------------------===//
1934
1935void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1936 const Twine &Name) {
1937 assert(getNumOperands() == 1 + IdxList.size() &&
1938 "NumOperands not initialized?");
1939 Op<0>() = Ptr;
1940 llvm::copy(Range&: IdxList, Out: op_begin() + 1);
1941 setName(Name);
1942}
1943
1944GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
1945 : Instruction(GEPI.getType(), GetElementPtr,
1946 OperandTraits<GetElementPtrInst>::op_end(U: this) -
1947 GEPI.getNumOperands(),
1948 GEPI.getNumOperands()),
1949 SourceElementType(GEPI.SourceElementType),
1950 ResultElementType(GEPI.ResultElementType) {
1951 std::copy(first: GEPI.op_begin(), last: GEPI.op_end(), result: op_begin());
1952 SubclassOptionalData = GEPI.SubclassOptionalData;
1953}
1954
1955Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, Value *Idx) {
1956 if (auto *Struct = dyn_cast<StructType>(Val: Ty)) {
1957 if (!Struct->indexValid(V: Idx))
1958 return nullptr;
1959 return Struct->getTypeAtIndex(V: Idx);
1960 }
1961 if (!Idx->getType()->isIntOrIntVectorTy())
1962 return nullptr;
1963 if (auto *Array = dyn_cast<ArrayType>(Val: Ty))
1964 return Array->getElementType();
1965 if (auto *Vector = dyn_cast<VectorType>(Val: Ty))
1966 return Vector->getElementType();
1967 return nullptr;
1968}
1969
1970Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, uint64_t Idx) {
1971 if (auto *Struct = dyn_cast<StructType>(Val: Ty)) {
1972 if (Idx >= Struct->getNumElements())
1973 return nullptr;
1974 return Struct->getElementType(N: Idx);
1975 }
1976 if (auto *Array = dyn_cast<ArrayType>(Val: Ty))
1977 return Array->getElementType();
1978 if (auto *Vector = dyn_cast<VectorType>(Val: Ty))
1979 return Vector->getElementType();
1980 return nullptr;
1981}
1982
1983template <typename IndexTy>
1984static Type *getIndexedTypeInternal(Type *Ty, ArrayRef<IndexTy> IdxList) {
1985 if (IdxList.empty())
1986 return Ty;
1987 for (IndexTy V : IdxList.slice(1)) {
1988 Ty = GetElementPtrInst::getTypeAtIndex(Ty, V);
1989 if (!Ty)
1990 return Ty;
1991 }
1992 return Ty;
1993}
1994
1995Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) {
1996 return getIndexedTypeInternal(Ty, IdxList);
1997}
1998
1999Type *GetElementPtrInst::getIndexedType(Type *Ty,
2000 ArrayRef<Constant *> IdxList) {
2001 return getIndexedTypeInternal(Ty, IdxList);
2002}
2003
2004Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) {
2005 return getIndexedTypeInternal(Ty, IdxList);
2006}
2007
2008/// hasAllZeroIndices - Return true if all of the indices of this GEP are
2009/// zeros. If so, the result pointer and the first operand have the same
2010/// value, just potentially different types.
2011bool GetElementPtrInst::hasAllZeroIndices() const {
2012 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
2013 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val: getOperand(i_nocapture: i))) {
2014 if (!CI->isZero()) return false;
2015 } else {
2016 return false;
2017 }
2018 }
2019 return true;
2020}
2021
2022/// hasAllConstantIndices - Return true if all of the indices of this GEP are
2023/// constant integers. If so, the result pointer and the first operand have
2024/// a constant offset between them.
2025bool GetElementPtrInst::hasAllConstantIndices() const {
2026 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
2027 if (!isa<ConstantInt>(Val: getOperand(i_nocapture: i)))
2028 return false;
2029 }
2030 return true;
2031}
2032
2033void GetElementPtrInst::setIsInBounds(bool B) {
2034 cast<GEPOperator>(Val: this)->setIsInBounds(B);
2035}
2036
2037bool GetElementPtrInst::isInBounds() const {
2038 return cast<GEPOperator>(Val: this)->isInBounds();
2039}
2040
2041bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL,
2042 APInt &Offset) const {
2043 // Delegate to the generic GEPOperator implementation.
2044 return cast<GEPOperator>(Val: this)->accumulateConstantOffset(DL, Offset);
2045}
2046
2047bool GetElementPtrInst::collectOffset(
2048 const DataLayout &DL, unsigned BitWidth,
2049 MapVector<Value *, APInt> &VariableOffsets,
2050 APInt &ConstantOffset) const {
2051 // Delegate to the generic GEPOperator implementation.
2052 return cast<GEPOperator>(Val: this)->collectOffset(DL, BitWidth, VariableOffsets,
2053 ConstantOffset);
2054}
2055
2056//===----------------------------------------------------------------------===//
2057// ExtractElementInst Implementation
2058//===----------------------------------------------------------------------===//
2059
2060ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
2061 const Twine &Name,
2062 BasicBlock::iterator InsertBef)
2063 : Instruction(
2064 cast<VectorType>(Val: Val->getType())->getElementType(), ExtractElement,
2065 OperandTraits<ExtractElementInst>::op_begin(U: this), 2, InsertBef) {
2066 assert(isValidOperands(Val, Index) &&
2067 "Invalid extractelement instruction operands!");
2068 Op<0>() = Val;
2069 Op<1>() = Index;
2070 setName(Name);
2071}
2072
2073ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
2074 const Twine &Name,
2075 Instruction *InsertBef)
2076 : Instruction(cast<VectorType>(Val: Val->getType())->getElementType(),
2077 ExtractElement,
2078 OperandTraits<ExtractElementInst>::op_begin(U: this),
2079 2, InsertBef) {
2080 assert(isValidOperands(Val, Index) &&
2081 "Invalid extractelement instruction operands!");
2082 Op<0>() = Val;
2083 Op<1>() = Index;
2084 setName(Name);
2085}
2086
2087ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
2088 const Twine &Name,
2089 BasicBlock *InsertAE)
2090 : Instruction(cast<VectorType>(Val: Val->getType())->getElementType(),
2091 ExtractElement,
2092 OperandTraits<ExtractElementInst>::op_begin(U: this),
2093 2, InsertAE) {
2094 assert(isValidOperands(Val, Index) &&
2095 "Invalid extractelement instruction operands!");
2096
2097 Op<0>() = Val;
2098 Op<1>() = Index;
2099 setName(Name);
2100}
2101
2102bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
2103 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
2104 return false;
2105 return true;
2106}
2107
2108//===----------------------------------------------------------------------===//
2109// InsertElementInst Implementation
2110//===----------------------------------------------------------------------===//
2111
2112InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2113 const Twine &Name,
2114 BasicBlock::iterator InsertBef)
2115 : Instruction(Vec->getType(), InsertElement,
2116 OperandTraits<InsertElementInst>::op_begin(U: this), 3,
2117 InsertBef) {
2118 assert(isValidOperands(Vec, Elt, Index) &&
2119 "Invalid insertelement instruction operands!");
2120 Op<0>() = Vec;
2121 Op<1>() = Elt;
2122 Op<2>() = Index;
2123 setName(Name);
2124}
2125
2126InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2127 const Twine &Name,
2128 Instruction *InsertBef)
2129 : Instruction(Vec->getType(), InsertElement,
2130 OperandTraits<InsertElementInst>::op_begin(U: this),
2131 3, InsertBef) {
2132 assert(isValidOperands(Vec, Elt, Index) &&
2133 "Invalid insertelement instruction operands!");
2134 Op<0>() = Vec;
2135 Op<1>() = Elt;
2136 Op<2>() = Index;
2137 setName(Name);
2138}
2139
2140InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2141 const Twine &Name,
2142 BasicBlock *InsertAE)
2143 : Instruction(Vec->getType(), InsertElement,
2144 OperandTraits<InsertElementInst>::op_begin(U: this),
2145 3, InsertAE) {
2146 assert(isValidOperands(Vec, Elt, Index) &&
2147 "Invalid insertelement instruction operands!");
2148
2149 Op<0>() = Vec;
2150 Op<1>() = Elt;
2151 Op<2>() = Index;
2152 setName(Name);
2153}
2154
2155bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
2156 const Value *Index) {
2157 if (!Vec->getType()->isVectorTy())
2158 return false; // First operand of insertelement must be vector type.
2159
2160 if (Elt->getType() != cast<VectorType>(Val: Vec->getType())->getElementType())
2161 return false;// Second operand of insertelement must be vector element type.
2162
2163 if (!Index->getType()->isIntegerTy())
2164 return false; // Third operand of insertelement must be i32.
2165 return true;
2166}
2167
2168//===----------------------------------------------------------------------===//
2169// ShuffleVectorInst Implementation
2170//===----------------------------------------------------------------------===//
2171
2172static Value *createPlaceholderForShuffleVector(Value *V) {
2173 assert(V && "Cannot create placeholder of nullptr V");
2174 return PoisonValue::get(T: V->getType());
2175}
2176
2177ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *Mask, const Twine &Name,
2178 BasicBlock::iterator InsertBefore)
2179 : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V: V1), Mask, Name,
2180 InsertBefore) {}
2181
2182ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *Mask, const Twine &Name,
2183 Instruction *InsertBefore)
2184 : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V: V1), Mask, Name,
2185 InsertBefore) {}
2186
2187ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *Mask, const Twine &Name,
2188 BasicBlock *InsertAtEnd)
2189 : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V: V1), Mask, Name,
2190 InsertAtEnd) {}
2191
2192ShuffleVectorInst::ShuffleVectorInst(Value *V1, ArrayRef<int> Mask,
2193 const Twine &Name,
2194 BasicBlock::iterator InsertBefore)
2195 : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V: V1), Mask, Name,
2196 InsertBefore) {}
2197
2198ShuffleVectorInst::ShuffleVectorInst(Value *V1, ArrayRef<int> Mask,
2199 const Twine &Name,
2200 Instruction *InsertBefore)
2201 : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V: V1), Mask, Name,
2202 InsertBefore) {}
2203
2204ShuffleVectorInst::ShuffleVectorInst(Value *V1, ArrayRef<int> Mask,
2205 const Twine &Name, BasicBlock *InsertAtEnd)
2206 : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V: V1), Mask, Name,
2207 InsertAtEnd) {}
2208
2209ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2210 const Twine &Name,
2211 BasicBlock::iterator InsertBefore)
2212 : Instruction(
2213 VectorType::get(ElementType: cast<VectorType>(Val: V1->getType())->getElementType(),
2214 EC: cast<VectorType>(Val: Mask->getType())->getElementCount()),
2215 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(U: this),
2216 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2217 assert(isValidOperands(V1, V2, Mask) &&
2218 "Invalid shuffle vector instruction operands!");
2219
2220 Op<0>() = V1;
2221 Op<1>() = V2;
2222 SmallVector<int, 16> MaskArr;
2223 getShuffleMask(Mask: cast<Constant>(Val: Mask), Result&: MaskArr);
2224 setShuffleMask(MaskArr);
2225 setName(Name);
2226}
2227
2228ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2229 const Twine &Name,
2230 Instruction *InsertBefore)
2231 : Instruction(
2232 VectorType::get(ElementType: cast<VectorType>(Val: V1->getType())->getElementType(),
2233 EC: cast<VectorType>(Val: Mask->getType())->getElementCount()),
2234 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(U: this),
2235 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2236 assert(isValidOperands(V1, V2, Mask) &&
2237 "Invalid shuffle vector instruction operands!");
2238
2239 Op<0>() = V1;
2240 Op<1>() = V2;
2241 SmallVector<int, 16> MaskArr;
2242 getShuffleMask(Mask: cast<Constant>(Val: Mask), Result&: MaskArr);
2243 setShuffleMask(MaskArr);
2244 setName(Name);
2245}
2246
2247ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2248 const Twine &Name, BasicBlock *InsertAtEnd)
2249 : Instruction(
2250 VectorType::get(ElementType: cast<VectorType>(Val: V1->getType())->getElementType(),
2251 EC: cast<VectorType>(Val: Mask->getType())->getElementCount()),
2252 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(U: this),
2253 OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2254 assert(isValidOperands(V1, V2, Mask) &&
2255 "Invalid shuffle vector instruction operands!");
2256
2257 Op<0>() = V1;
2258 Op<1>() = V2;
2259 SmallVector<int, 16> MaskArr;
2260 getShuffleMask(Mask: cast<Constant>(Val: Mask), Result&: MaskArr);
2261 setShuffleMask(MaskArr);
2262 setName(Name);
2263}
2264
2265ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2266 const Twine &Name,
2267 BasicBlock::iterator InsertBefore)
2268 : Instruction(
2269 VectorType::get(ElementType: cast<VectorType>(Val: V1->getType())->getElementType(),
2270 NumElements: Mask.size(), Scalable: isa<ScalableVectorType>(Val: V1->getType())),
2271 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(U: this),
2272 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2273 assert(isValidOperands(V1, V2, Mask) &&
2274 "Invalid shuffle vector instruction operands!");
2275 Op<0>() = V1;
2276 Op<1>() = V2;
2277 setShuffleMask(Mask);
2278 setName(Name);
2279}
2280
2281ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2282 const Twine &Name,
2283 Instruction *InsertBefore)
2284 : Instruction(
2285 VectorType::get(ElementType: cast<VectorType>(Val: V1->getType())->getElementType(),
2286 NumElements: Mask.size(), Scalable: isa<ScalableVectorType>(Val: V1->getType())),
2287 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(U: this),
2288 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2289 assert(isValidOperands(V1, V2, Mask) &&
2290 "Invalid shuffle vector instruction operands!");
2291 Op<0>() = V1;
2292 Op<1>() = V2;
2293 setShuffleMask(Mask);
2294 setName(Name);
2295}
2296
2297ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2298 const Twine &Name, BasicBlock *InsertAtEnd)
2299 : Instruction(
2300 VectorType::get(ElementType: cast<VectorType>(Val: V1->getType())->getElementType(),
2301 NumElements: Mask.size(), Scalable: isa<ScalableVectorType>(Val: V1->getType())),
2302 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(U: this),
2303 OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2304 assert(isValidOperands(V1, V2, Mask) &&
2305 "Invalid shuffle vector instruction operands!");
2306
2307 Op<0>() = V1;
2308 Op<1>() = V2;
2309 setShuffleMask(Mask);
2310 setName(Name);
2311}
2312
2313void ShuffleVectorInst::commute() {
2314 int NumOpElts = cast<FixedVectorType>(Val: Op<0>()->getType())->getNumElements();
2315 int NumMaskElts = ShuffleMask.size();
2316 SmallVector<int, 16> NewMask(NumMaskElts);
2317 for (int i = 0; i != NumMaskElts; ++i) {
2318 int MaskElt = getMaskValue(Elt: i);
2319 if (MaskElt == PoisonMaskElem) {
2320 NewMask[i] = PoisonMaskElem;
2321 continue;
2322 }
2323 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
2324 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
2325 NewMask[i] = MaskElt;
2326 }
2327 setShuffleMask(NewMask);
2328 Op<0>().swap(RHS&: Op<1>());
2329}
2330
2331bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
2332 ArrayRef<int> Mask) {
2333 // V1 and V2 must be vectors of the same type.
2334 if (!isa<VectorType>(Val: V1->getType()) || V1->getType() != V2->getType())
2335 return false;
2336
2337 // Make sure the mask elements make sense.
2338 int V1Size =
2339 cast<VectorType>(Val: V1->getType())->getElementCount().getKnownMinValue();
2340 for (int Elem : Mask)
2341 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
2342 return false;
2343
2344 if (isa<ScalableVectorType>(Val: V1->getType()))
2345 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Range&: Mask))
2346 return false;
2347
2348 return true;
2349}
2350
2351bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
2352 const Value *Mask) {
2353 // V1 and V2 must be vectors of the same type.
2354 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
2355 return false;
2356
2357 // Mask must be vector of i32, and must be the same kind of vector as the
2358 // input vectors
2359 auto *MaskTy = dyn_cast<VectorType>(Val: Mask->getType());
2360 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(Bitwidth: 32) ||
2361 isa<ScalableVectorType>(Val: MaskTy) != isa<ScalableVectorType>(Val: V1->getType()))
2362 return false;
2363
2364 // Check to see if Mask is valid.
2365 if (isa<UndefValue>(Val: Mask) || isa<ConstantAggregateZero>(Val: Mask))
2366 return true;
2367
2368 if (const auto *MV = dyn_cast<ConstantVector>(Val: Mask)) {
2369 unsigned V1Size = cast<FixedVectorType>(Val: V1->getType())->getNumElements();
2370 for (Value *Op : MV->operands()) {
2371 if (auto *CI = dyn_cast<ConstantInt>(Val: Op)) {
2372 if (CI->uge(Num: V1Size*2))
2373 return false;
2374 } else if (!isa<UndefValue>(Val: Op)) {
2375 return false;
2376 }
2377 }
2378 return true;
2379 }
2380
2381 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Val: Mask)) {
2382 unsigned V1Size = cast<FixedVectorType>(Val: V1->getType())->getNumElements();
2383 for (unsigned i = 0, e = cast<FixedVectorType>(Val: MaskTy)->getNumElements();
2384 i != e; ++i)
2385 if (CDS->getElementAsInteger(i) >= V1Size*2)
2386 return false;
2387 return true;
2388 }
2389
2390 return false;
2391}
2392
2393void ShuffleVectorInst::getShuffleMask(const Constant *Mask,
2394 SmallVectorImpl<int> &Result) {
2395 ElementCount EC = cast<VectorType>(Val: Mask->getType())->getElementCount();
2396
2397 if (isa<ConstantAggregateZero>(Val: Mask)) {
2398 Result.resize(N: EC.getKnownMinValue(), NV: 0);
2399 return;
2400 }
2401
2402 Result.reserve(N: EC.getKnownMinValue());
2403
2404 if (EC.isScalable()) {
2405 assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
2406 "Scalable vector shuffle mask must be undef or zeroinitializer");
2407 int MaskVal = isa<UndefValue>(Val: Mask) ? -1 : 0;
2408 for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
2409 Result.emplace_back(Args&: MaskVal);
2410 return;
2411 }
2412
2413 unsigned NumElts = EC.getKnownMinValue();
2414
2415 if (auto *CDS = dyn_cast<ConstantDataSequential>(Val: Mask)) {
2416 for (unsigned i = 0; i != NumElts; ++i)
2417 Result.push_back(Elt: CDS->getElementAsInteger(i));
2418 return;
2419 }
2420 for (unsigned i = 0; i != NumElts; ++i) {
2421 Constant *C = Mask->getAggregateElement(Elt: i);
2422 Result.push_back(Elt: isa<UndefValue>(Val: C) ? -1 :
2423 cast<ConstantInt>(Val: C)->getZExtValue());
2424 }
2425}
2426
2427void ShuffleVectorInst::setShuffleMask(ArrayRef<int> Mask) {
2428 ShuffleMask.assign(in_start: Mask.begin(), in_end: Mask.end());
2429 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, ResultTy: getType());
2430}
2431
2432Constant *ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2433 Type *ResultTy) {
2434 Type *Int32Ty = Type::getInt32Ty(C&: ResultTy->getContext());
2435 if (isa<ScalableVectorType>(Val: ResultTy)) {
2436 assert(all_equal(Mask) && "Unexpected shuffle");
2437 Type *VecTy = VectorType::get(ElementType: Int32Ty, NumElements: Mask.size(), Scalable: true);
2438 if (Mask[0] == 0)
2439 return Constant::getNullValue(Ty: VecTy);
2440 return UndefValue::get(T: VecTy);
2441 }
2442 SmallVector<Constant *, 16> MaskConst;
2443 for (int Elem : Mask) {
2444 if (Elem == PoisonMaskElem)
2445 MaskConst.push_back(Elt: PoisonValue::get(T: Int32Ty));
2446 else
2447 MaskConst.push_back(Elt: ConstantInt::get(Ty: Int32Ty, V: Elem));
2448 }
2449 return ConstantVector::get(V: MaskConst);
2450}
2451
2452static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2453 assert(!Mask.empty() && "Shuffle mask must contain elements");
2454 bool UsesLHS = false;
2455 bool UsesRHS = false;
2456 for (int I : Mask) {
2457 if (I == -1)
2458 continue;
2459 assert(I >= 0 && I < (NumOpElts * 2) &&
2460 "Out-of-bounds shuffle mask element");
2461 UsesLHS |= (I < NumOpElts);
2462 UsesRHS |= (I >= NumOpElts);
2463 if (UsesLHS && UsesRHS)
2464 return false;
2465 }
2466 // Allow for degenerate case: completely undef mask means neither source is used.
2467 return UsesLHS || UsesRHS;
2468}
2469
2470bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts) {
2471 // We don't have vector operand size information, so assume operands are the
2472 // same size as the mask.
2473 return isSingleSourceMaskImpl(Mask, NumOpElts: NumSrcElts);
2474}
2475
2476static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2477 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
2478 return false;
2479 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
2480 if (Mask[i] == -1)
2481 continue;
2482 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
2483 return false;
2484 }
2485 return true;
2486}
2487
2488bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask, int NumSrcElts) {
2489 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2490 return false;
2491 // We don't have vector operand size information, so assume operands are the
2492 // same size as the mask.
2493 return isIdentityMaskImpl(Mask, NumOpElts: NumSrcElts);
2494}
2495
2496bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask, int NumSrcElts) {
2497 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2498 return false;
2499 if (!isSingleSourceMask(Mask, NumSrcElts))
2500 return false;
2501
2502 // The number of elements in the mask must be at least 2.
2503 if (NumSrcElts < 2)
2504 return false;
2505
2506 for (int I = 0, E = Mask.size(); I < E; ++I) {
2507 if (Mask[I] == -1)
2508 continue;
2509 if (Mask[I] != (NumSrcElts - 1 - I) &&
2510 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
2511 return false;
2512 }
2513 return true;
2514}
2515
2516bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts) {
2517 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2518 return false;
2519 if (!isSingleSourceMask(Mask, NumSrcElts))
2520 return false;
2521 for (int I = 0, E = Mask.size(); I < E; ++I) {
2522 if (Mask[I] == -1)
2523 continue;
2524 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
2525 return false;
2526 }
2527 return true;
2528}
2529
2530bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask, int NumSrcElts) {
2531 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2532 return false;
2533 // Select is differentiated from identity. It requires using both sources.
2534 if (isSingleSourceMask(Mask, NumSrcElts))
2535 return false;
2536 for (int I = 0, E = Mask.size(); I < E; ++I) {
2537 if (Mask[I] == -1)
2538 continue;
2539 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
2540 return false;
2541 }
2542 return true;
2543}
2544
2545bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask, int NumSrcElts) {
2546 // Example masks that will return true:
2547 // v1 = <a, b, c, d>
2548 // v2 = <e, f, g, h>
2549 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2550 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2551
2552 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2553 return false;
2554 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2555 int Sz = Mask.size();
2556 if (Sz < 2 || !isPowerOf2_32(Value: Sz))
2557 return false;
2558
2559 // 2. The first element of the mask must be either a 0 or a 1.
2560 if (Mask[0] != 0 && Mask[0] != 1)
2561 return false;
2562
2563 // 3. The difference between the first 2 elements must be equal to the
2564 // number of elements in the mask.
2565 if ((Mask[1] - Mask[0]) != NumSrcElts)
2566 return false;
2567
2568 // 4. The difference between consecutive even-numbered and odd-numbered
2569 // elements must be equal to 2.
2570 for (int I = 2; I < Sz; ++I) {
2571 int MaskEltVal = Mask[I];
2572 if (MaskEltVal == -1)
2573 return false;
2574 int MaskEltPrevVal = Mask[I - 2];
2575 if (MaskEltVal - MaskEltPrevVal != 2)
2576 return false;
2577 }
2578 return true;
2579}
2580
2581bool ShuffleVectorInst::isSpliceMask(ArrayRef<int> Mask, int NumSrcElts,
2582 int &Index) {
2583 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2584 return false;
2585 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2586 int StartIndex = -1;
2587 for (int I = 0, E = Mask.size(); I != E; ++I) {
2588 int MaskEltVal = Mask[I];
2589 if (MaskEltVal == -1)
2590 continue;
2591
2592 if (StartIndex == -1) {
2593 // Don't support a StartIndex that begins in the second input, or if the
2594 // first non-undef index would access below the StartIndex.
2595 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2596 return false;
2597
2598 StartIndex = MaskEltVal - I;
2599 continue;
2600 }
2601
2602 // Splice is sequential starting from StartIndex.
2603 if (MaskEltVal != (StartIndex + I))
2604 return false;
2605 }
2606
2607 if (StartIndex == -1)
2608 return false;
2609
2610 // NOTE: This accepts StartIndex == 0 (COPY).
2611 Index = StartIndex;
2612 return true;
2613}
2614
2615bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask,
2616 int NumSrcElts, int &Index) {
2617 // Must extract from a single source.
2618 if (!isSingleSourceMaskImpl(Mask, NumOpElts: NumSrcElts))
2619 return false;
2620
2621 // Must be smaller (else this is an Identity shuffle).
2622 if (NumSrcElts <= (int)Mask.size())
2623 return false;
2624
2625 // Find start of extraction, accounting that we may start with an UNDEF.
2626 int SubIndex = -1;
2627 for (int i = 0, e = Mask.size(); i != e; ++i) {
2628 int M = Mask[i];
2629 if (M < 0)
2630 continue;
2631 int Offset = (M % NumSrcElts) - i;
2632 if (0 <= SubIndex && SubIndex != Offset)
2633 return false;
2634 SubIndex = Offset;
2635 }
2636
2637 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2638 Index = SubIndex;
2639 return true;
2640 }
2641 return false;
2642}
2643
2644bool ShuffleVectorInst::isInsertSubvectorMask(ArrayRef<int> Mask,
2645 int NumSrcElts, int &NumSubElts,
2646 int &Index) {
2647 int NumMaskElts = Mask.size();
2648
2649 // Don't try to match if we're shuffling to a smaller size.
2650 if (NumMaskElts < NumSrcElts)
2651 return false;
2652
2653 // TODO: We don't recognize self-insertion/widening.
2654 if (isSingleSourceMaskImpl(Mask, NumOpElts: NumSrcElts))
2655 return false;
2656
2657 // Determine which mask elements are attributed to which source.
2658 APInt UndefElts = APInt::getZero(numBits: NumMaskElts);
2659 APInt Src0Elts = APInt::getZero(numBits: NumMaskElts);
2660 APInt Src1Elts = APInt::getZero(numBits: NumMaskElts);
2661 bool Src0Identity = true;
2662 bool Src1Identity = true;
2663
2664 for (int i = 0; i != NumMaskElts; ++i) {
2665 int M = Mask[i];
2666 if (M < 0) {
2667 UndefElts.setBit(i);
2668 continue;
2669 }
2670 if (M < NumSrcElts) {
2671 Src0Elts.setBit(i);
2672 Src0Identity &= (M == i);
2673 continue;
2674 }
2675 Src1Elts.setBit(i);
2676 Src1Identity &= (M == (i + NumSrcElts));
2677 }
2678 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2679 "unknown shuffle elements");
2680 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2681 "2-source shuffle not found");
2682
2683 // Determine lo/hi span ranges.
2684 // TODO: How should we handle undefs at the start of subvector insertions?
2685 int Src0Lo = Src0Elts.countr_zero();
2686 int Src1Lo = Src1Elts.countr_zero();
2687 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2688 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2689
2690 // If src0 is in place, see if the src1 elements is inplace within its own
2691 // span.
2692 if (Src0Identity) {
2693 int NumSub1Elts = Src1Hi - Src1Lo;
2694 ArrayRef<int> Sub1Mask = Mask.slice(N: Src1Lo, M: NumSub1Elts);
2695 if (isIdentityMaskImpl(Mask: Sub1Mask, NumOpElts: NumSrcElts)) {
2696 NumSubElts = NumSub1Elts;
2697 Index = Src1Lo;
2698 return true;
2699 }
2700 }
2701
2702 // If src1 is in place, see if the src0 elements is inplace within its own
2703 // span.
2704 if (Src1Identity) {
2705 int NumSub0Elts = Src0Hi - Src0Lo;
2706 ArrayRef<int> Sub0Mask = Mask.slice(N: Src0Lo, M: NumSub0Elts);
2707 if (isIdentityMaskImpl(Mask: Sub0Mask, NumOpElts: NumSrcElts)) {
2708 NumSubElts = NumSub0Elts;
2709 Index = Src0Lo;
2710 return true;
2711 }
2712 }
2713
2714 return false;
2715}
2716
2717bool ShuffleVectorInst::isIdentityWithPadding() const {
2718 // FIXME: Not currently possible to express a shuffle mask for a scalable
2719 // vector for this case.
2720 if (isa<ScalableVectorType>(Val: getType()))
2721 return false;
2722
2723 int NumOpElts = cast<FixedVectorType>(Val: Op<0>()->getType())->getNumElements();
2724 int NumMaskElts = cast<FixedVectorType>(Val: getType())->getNumElements();
2725 if (NumMaskElts <= NumOpElts)
2726 return false;
2727
2728 // The first part of the mask must choose elements from exactly 1 source op.
2729 ArrayRef<int> Mask = getShuffleMask();
2730 if (!isIdentityMaskImpl(Mask, NumOpElts))
2731 return false;
2732
2733 // All extending must be with undef elements.
2734 for (int i = NumOpElts; i < NumMaskElts; ++i)
2735 if (Mask[i] != -1)
2736 return false;
2737
2738 return true;
2739}
2740
2741bool ShuffleVectorInst::isIdentityWithExtract() const {
2742 // FIXME: Not currently possible to express a shuffle mask for a scalable
2743 // vector for this case.
2744 if (isa<ScalableVectorType>(Val: getType()))
2745 return false;
2746
2747 int NumOpElts = cast<FixedVectorType>(Val: Op<0>()->getType())->getNumElements();
2748 int NumMaskElts = cast<FixedVectorType>(Val: getType())->getNumElements();
2749 if (NumMaskElts >= NumOpElts)
2750 return false;
2751
2752 return isIdentityMaskImpl(Mask: getShuffleMask(), NumOpElts);
2753}
2754
2755bool ShuffleVectorInst::isConcat() const {
2756 // Vector concatenation is differentiated from identity with padding.
2757 if (isa<UndefValue>(Val: Op<0>()) || isa<UndefValue>(Val: Op<1>()))
2758 return false;
2759
2760 // FIXME: Not currently possible to express a shuffle mask for a scalable
2761 // vector for this case.
2762 if (isa<ScalableVectorType>(Val: getType()))
2763 return false;
2764
2765 int NumOpElts = cast<FixedVectorType>(Val: Op<0>()->getType())->getNumElements();
2766 int NumMaskElts = cast<FixedVectorType>(Val: getType())->getNumElements();
2767 if (NumMaskElts != NumOpElts * 2)
2768 return false;
2769
2770 // Use the mask length rather than the operands' vector lengths here. We
2771 // already know that the shuffle returns a vector twice as long as the inputs,
2772 // and neither of the inputs are undef vectors. If the mask picks consecutive
2773 // elements from both inputs, then this is a concatenation of the inputs.
2774 return isIdentityMaskImpl(Mask: getShuffleMask(), NumOpElts: NumMaskElts);
2775}
2776
2777static bool isReplicationMaskWithParams(ArrayRef<int> Mask,
2778 int ReplicationFactor, int VF) {
2779 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2780 "Unexpected mask size.");
2781
2782 for (int CurrElt : seq(Size: VF)) {
2783 ArrayRef<int> CurrSubMask = Mask.take_front(N: ReplicationFactor);
2784 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2785 "Run out of mask?");
2786 Mask = Mask.drop_front(N: ReplicationFactor);
2787 if (!all_of(Range&: CurrSubMask, P: [CurrElt](int MaskElt) {
2788 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2789 }))
2790 return false;
2791 }
2792 assert(Mask.empty() && "Did not consume the whole mask?");
2793
2794 return true;
2795}
2796
2797bool ShuffleVectorInst::isReplicationMask(ArrayRef<int> Mask,
2798 int &ReplicationFactor, int &VF) {
2799 // undef-less case is trivial.
2800 if (!llvm::is_contained(Range&: Mask, Element: PoisonMaskElem)) {
2801 ReplicationFactor =
2802 Mask.take_while(Pred: [](int MaskElt) { return MaskElt == 0; }).size();
2803 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2804 return false;
2805 VF = Mask.size() / ReplicationFactor;
2806 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2807 }
2808
2809 // However, if the mask contains undef's, we have to enumerate possible tuples
2810 // and pick one. There are bounds on replication factor: [1, mask size]
2811 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2812 // Additionally, mask size is a replication factor multiplied by vector size,
2813 // which further significantly reduces the search space.
2814
2815 // Before doing that, let's perform basic correctness checking first.
2816 int Largest = -1;
2817 for (int MaskElt : Mask) {
2818 if (MaskElt == PoisonMaskElem)
2819 continue;
2820 // Elements must be in non-decreasing order.
2821 if (MaskElt < Largest)
2822 return false;
2823 Largest = std::max(a: Largest, b: MaskElt);
2824 }
2825
2826 // Prefer larger replication factor if all else equal.
2827 for (int PossibleReplicationFactor :
2828 reverse(C: seq_inclusive<unsigned>(Begin: 1, End: Mask.size()))) {
2829 if (Mask.size() % PossibleReplicationFactor != 0)
2830 continue;
2831 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2832 if (!isReplicationMaskWithParams(Mask, ReplicationFactor: PossibleReplicationFactor,
2833 VF: PossibleVF))
2834 continue;
2835 ReplicationFactor = PossibleReplicationFactor;
2836 VF = PossibleVF;
2837 return true;
2838 }
2839
2840 return false;
2841}
2842
2843bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2844 int &VF) const {
2845 // Not possible to express a shuffle mask for a scalable vector for this
2846 // case.
2847 if (isa<ScalableVectorType>(Val: getType()))
2848 return false;
2849
2850 VF = cast<FixedVectorType>(Val: Op<0>()->getType())->getNumElements();
2851 if (ShuffleMask.size() % VF != 0)
2852 return false;
2853 ReplicationFactor = ShuffleMask.size() / VF;
2854
2855 return isReplicationMaskWithParams(Mask: ShuffleMask, ReplicationFactor, VF);
2856}
2857
2858bool ShuffleVectorInst::isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF) {
2859 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2860 Mask.size() % VF != 0)
2861 return false;
2862 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2863 ArrayRef<int> SubMask = Mask.slice(N: K, M: VF);
2864 if (all_of(Range&: SubMask, P: [](int Idx) { return Idx == PoisonMaskElem; }))
2865 continue;
2866 SmallBitVector Used(VF, false);
2867 for (int Idx : SubMask) {
2868 if (Idx != PoisonMaskElem && Idx < VF)
2869 Used.set(Idx);
2870 }
2871 if (!Used.all())
2872 return false;
2873 }
2874 return true;
2875}
2876
2877/// Return true if this shuffle mask is a replication mask.
2878bool ShuffleVectorInst::isOneUseSingleSourceMask(int VF) const {
2879 // Not possible to express a shuffle mask for a scalable vector for this
2880 // case.
2881 if (isa<ScalableVectorType>(Val: getType()))
2882 return false;
2883 if (!isSingleSourceMask(Mask: ShuffleMask, NumSrcElts: VF))
2884 return false;
2885
2886 return isOneUseSingleSourceMask(Mask: ShuffleMask, VF);
2887}
2888
2889bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2890 FixedVectorType *OpTy = dyn_cast<FixedVectorType>(Val: getOperand(i_nocapture: 0)->getType());
2891 // shuffle_vector can only interleave fixed length vectors - for scalable
2892 // vectors, see the @llvm.experimental.vector.interleave2 intrinsic
2893 if (!OpTy)
2894 return false;
2895 unsigned OpNumElts = OpTy->getNumElements();
2896
2897 return isInterleaveMask(Mask: ShuffleMask, Factor, NumInputElts: OpNumElts * 2);
2898}
2899
2900bool ShuffleVectorInst::isInterleaveMask(
2901 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2902 SmallVectorImpl<unsigned> &StartIndexes) {
2903 unsigned NumElts = Mask.size();
2904 if (NumElts % Factor)
2905 return false;
2906
2907 unsigned LaneLen = NumElts / Factor;
2908 if (!isPowerOf2_32(Value: LaneLen))
2909 return false;
2910
2911 StartIndexes.resize(N: Factor);
2912
2913 // Check whether each element matches the general interleaved rule.
2914 // Ignore undef elements, as long as the defined elements match the rule.
2915 // Outer loop processes all factors (x, y, z in the above example)
2916 unsigned I = 0, J;
2917 for (; I < Factor; I++) {
2918 unsigned SavedLaneValue;
2919 unsigned SavedNoUndefs = 0;
2920
2921 // Inner loop processes consecutive accesses (x, x+1... in the example)
2922 for (J = 0; J < LaneLen - 1; J++) {
2923 // Lane computes x's position in the Mask
2924 unsigned Lane = J * Factor + I;
2925 unsigned NextLane = Lane + Factor;
2926 int LaneValue = Mask[Lane];
2927 int NextLaneValue = Mask[NextLane];
2928
2929 // If both are defined, values must be sequential
2930 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2931 LaneValue + 1 != NextLaneValue)
2932 break;
2933
2934 // If the next value is undef, save the current one as reference
2935 if (LaneValue >= 0 && NextLaneValue < 0) {
2936 SavedLaneValue = LaneValue;
2937 SavedNoUndefs = 1;
2938 }
2939
2940 // Undefs are allowed, but defined elements must still be consecutive:
2941 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2942 // Verify this by storing the last non-undef followed by an undef
2943 // Check that following non-undef masks are incremented with the
2944 // corresponding distance.
2945 if (SavedNoUndefs > 0 && LaneValue < 0) {
2946 SavedNoUndefs++;
2947 if (NextLaneValue >= 0 &&
2948 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2949 break;
2950 }
2951 }
2952
2953 if (J < LaneLen - 1)
2954 return false;
2955
2956 int StartMask = 0;
2957 if (Mask[I] >= 0) {
2958 // Check that the start of the I range (J=0) is greater than 0
2959 StartMask = Mask[I];
2960 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2961 // StartMask defined by the last value in lane
2962 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2963 } else if (SavedNoUndefs > 0) {
2964 // StartMask defined by some non-zero value in the j loop
2965 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2966 }
2967 // else StartMask remains set to 0, i.e. all elements are undefs
2968
2969 if (StartMask < 0)
2970 return false;
2971 // We must stay within the vectors; This case can happen with undefs.
2972 if (StartMask + LaneLen > NumInputElts)
2973 return false;
2974
2975 StartIndexes[I] = StartMask;
2976 }
2977
2978 return true;
2979}
2980
2981/// Check if the mask is a DE-interleave mask of the given factor
2982/// \p Factor like:
2983/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2984bool ShuffleVectorInst::isDeInterleaveMaskOfFactor(ArrayRef<int> Mask,
2985 unsigned Factor,
2986 unsigned &Index) {
2987 // Check all potential start indices from 0 to (Factor - 1).
2988 for (unsigned Idx = 0; Idx < Factor; Idx++) {
2989 unsigned I = 0;
2990
2991 // Check that elements are in ascending order by Factor. Ignore undef
2992 // elements.
2993 for (; I < Mask.size(); I++)
2994 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
2995 break;
2996
2997 if (I == Mask.size()) {
2998 Index = Idx;
2999 return true;
3000 }
3001 }
3002
3003 return false;
3004}
3005
3006/// Try to lower a vector shuffle as a bit rotation.
3007///
3008/// Look for a repeated rotation pattern in each sub group.
3009/// Returns an element-wise left bit rotation amount or -1 if failed.
3010static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
3011 int NumElts = Mask.size();
3012 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
3013
3014 int RotateAmt = -1;
3015 for (int i = 0; i != NumElts; i += NumSubElts) {
3016 for (int j = 0; j != NumSubElts; ++j) {
3017 int M = Mask[i + j];
3018 if (M < 0)
3019 continue;
3020 if (M < i || M >= i + NumSubElts)
3021 return -1;
3022 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
3023 if (0 <= RotateAmt && Offset != RotateAmt)
3024 return -1;
3025 RotateAmt = Offset;
3026 }
3027 }
3028 return RotateAmt;
3029}
3030
3031bool ShuffleVectorInst::isBitRotateMask(
3032 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
3033 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
3034 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
3035 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
3036 if (EltRotateAmt < 0)
3037 continue;
3038 RotateAmt = EltRotateAmt * EltSizeInBits;
3039 return true;
3040 }
3041
3042 return false;
3043}
3044
3045//===----------------------------------------------------------------------===//
3046// InsertValueInst Class
3047//===----------------------------------------------------------------------===//
3048
3049void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
3050 const Twine &Name) {
3051 assert(getNumOperands() == 2 && "NumOperands not initialized?");
3052
3053 // There's no fundamental reason why we require at least one index
3054 // (other than weirdness with &*IdxBegin being invalid; see
3055 // getelementptr's init routine for example). But there's no
3056 // present need to support it.
3057 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
3058
3059 assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) ==
3060 Val->getType() && "Inserted value must match indexed type!");
3061 Op<0>() = Agg;
3062 Op<1>() = Val;
3063
3064 Indices.append(in_start: Idxs.begin(), in_end: Idxs.end());
3065 setName(Name);
3066}
3067
3068InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
3069 : Instruction(IVI.getType(), InsertValue,
3070 OperandTraits<InsertValueInst>::op_begin(U: this), 2),
3071 Indices(IVI.Indices) {
3072 Op<0>() = IVI.getOperand(i_nocapture: 0);
3073 Op<1>() = IVI.getOperand(i_nocapture: 1);
3074 SubclassOptionalData = IVI.SubclassOptionalData;
3075}
3076
3077//===----------------------------------------------------------------------===//
3078// ExtractValueInst Class
3079//===----------------------------------------------------------------------===//
3080
3081void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
3082 assert(getNumOperands() == 1 && "NumOperands not initialized?");
3083
3084 // There's no fundamental reason why we require at least one index.
3085 // But there's no present need to support it.
3086 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
3087
3088 Indices.append(in_start: Idxs.begin(), in_end: Idxs.end());
3089 setName(Name);
3090}
3091
3092ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
3093 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(i_nocapture: 0)),
3094 Indices(EVI.Indices) {
3095 SubclassOptionalData = EVI.SubclassOptionalData;
3096}
3097
3098// getIndexedType - Returns the type of the element that would be extracted
3099// with an extractvalue instruction with the specified parameters.
3100//
3101// A null type is returned if the indices are invalid for the specified
3102// pointer type.
3103//
3104Type *ExtractValueInst::getIndexedType(Type *Agg,
3105 ArrayRef<unsigned> Idxs) {
3106 for (unsigned Index : Idxs) {
3107 // We can't use CompositeType::indexValid(Index) here.
3108 // indexValid() always returns true for arrays because getelementptr allows
3109 // out-of-bounds indices. Since we don't allow those for extractvalue and
3110 // insertvalue we need to check array indexing manually.
3111 // Since the only other types we can index into are struct types it's just
3112 // as easy to check those manually as well.
3113 if (ArrayType *AT = dyn_cast<ArrayType>(Val: Agg)) {
3114 if (Index >= AT->getNumElements())
3115 return nullptr;
3116 Agg = AT->getElementType();
3117 } else if (StructType *ST = dyn_cast<StructType>(Val: Agg)) {
3118 if (Index >= ST->getNumElements())
3119 return nullptr;
3120 Agg = ST->getElementType(N: Index);
3121 } else {
3122 // Not a valid type to index into.
3123 return nullptr;
3124 }
3125 }
3126 return const_cast<Type*>(Agg);
3127}
3128
3129//===----------------------------------------------------------------------===//
3130// UnaryOperator Class
3131//===----------------------------------------------------------------------===//
3132
3133UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
3134 const Twine &Name,
3135 BasicBlock::iterator InsertBefore)
3136 : UnaryInstruction(Ty, iType, S, InsertBefore) {
3137 Op<0>() = S;
3138 setName(Name);
3139 AssertOK();
3140}
3141
3142UnaryOperator::UnaryOperator(UnaryOps iType, Value *S,
3143 Type *Ty, const Twine &Name,
3144 Instruction *InsertBefore)
3145 : UnaryInstruction(Ty, iType, S, InsertBefore) {
3146 Op<0>() = S;
3147 setName(Name);
3148 AssertOK();
3149}
3150
3151UnaryOperator::UnaryOperator(UnaryOps iType, Value *S,
3152 Type *Ty, const Twine &Name,
3153 BasicBlock *InsertAtEnd)
3154 : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
3155 Op<0>() = S;
3156 setName(Name);
3157 AssertOK();
3158}
3159
3160UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, const Twine &Name,
3161 BasicBlock::iterator InsertBefore) {
3162 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
3163}
3164
3165UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S,
3166 const Twine &Name,
3167 Instruction *InsertBefore) {
3168 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
3169}
3170
3171UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S,
3172 const Twine &Name,
3173 BasicBlock *InsertAtEnd) {
3174 UnaryOperator *Res = Create(Op, S, Name);
3175 Res->insertInto(ParentBB: InsertAtEnd, It: InsertAtEnd->end());
3176 return Res;
3177}
3178
3179void UnaryOperator::AssertOK() {
3180 Value *LHS = getOperand(i_nocapture: 0);
3181 (void)LHS; // Silence warnings.
3182#ifndef NDEBUG
3183 switch (getOpcode()) {
3184 case FNeg:
3185 assert(getType() == LHS->getType() &&
3186 "Unary operation should return same type as operand!");
3187 assert(getType()->isFPOrFPVectorTy() &&
3188 "Tried to create a floating-point operation on a "
3189 "non-floating-point type!");
3190 break;
3191 default: llvm_unreachable("Invalid opcode provided");
3192 }
3193#endif
3194}
3195
3196//===----------------------------------------------------------------------===//
3197// BinaryOperator Class
3198//===----------------------------------------------------------------------===//
3199
3200BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
3201 const Twine &Name,
3202 BasicBlock::iterator InsertBefore)
3203 : Instruction(Ty, iType, OperandTraits<BinaryOperator>::op_begin(U: this),
3204 OperandTraits<BinaryOperator>::operands(this), InsertBefore) {
3205 Op<0>() = S1;
3206 Op<1>() = S2;
3207 setName(Name);
3208 AssertOK();
3209}
3210
3211BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
3212 Type *Ty, const Twine &Name,
3213 Instruction *InsertBefore)
3214 : Instruction(Ty, iType,
3215 OperandTraits<BinaryOperator>::op_begin(U: this),
3216 OperandTraits<BinaryOperator>::operands(this),
3217 InsertBefore) {
3218 Op<0>() = S1;
3219 Op<1>() = S2;
3220 setName(Name);
3221 AssertOK();
3222}
3223
3224BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
3225 Type *Ty, const Twine &Name,
3226 BasicBlock *InsertAtEnd)
3227 : Instruction(Ty, iType,
3228 OperandTraits<BinaryOperator>::op_begin(U: this),
3229 OperandTraits<BinaryOperator>::operands(this),
3230 InsertAtEnd) {
3231 Op<0>() = S1;
3232 Op<1>() = S2;
3233 setName(Name);
3234 AssertOK();
3235}
3236
3237void BinaryOperator::AssertOK() {
3238 Value *LHS = getOperand(i_nocapture: 0), *RHS = getOperand(i_nocapture: 1);
3239 (void)LHS; (void)RHS; // Silence warnings.
3240 assert(LHS->getType() == RHS->getType() &&
3241 "Binary operator operand types must match!");
3242#ifndef NDEBUG
3243 switch (getOpcode()) {
3244 case Add: case Sub:
3245 case Mul:
3246 assert(getType() == LHS->getType() &&
3247 "Arithmetic operation should return same type as operands!");
3248 assert(getType()->isIntOrIntVectorTy() &&
3249 "Tried to create an integer operation on a non-integer type!");
3250 break;
3251 case FAdd: case FSub:
3252 case FMul:
3253 assert(getType() == LHS->getType() &&
3254 "Arithmetic operation should return same type as operands!");
3255 assert(getType()->isFPOrFPVectorTy() &&
3256 "Tried to create a floating-point operation on a "
3257 "non-floating-point type!");
3258 break;
3259 case UDiv:
3260 case SDiv:
3261 assert(getType() == LHS->getType() &&
3262 "Arithmetic operation should return same type as operands!");
3263 assert(getType()->isIntOrIntVectorTy() &&
3264 "Incorrect operand type (not integer) for S/UDIV");
3265 break;
3266 case FDiv:
3267 assert(getType() == LHS->getType() &&
3268 "Arithmetic operation should return same type as operands!");
3269 assert(getType()->isFPOrFPVectorTy() &&
3270 "Incorrect operand type (not floating point) for FDIV");
3271 break;
3272 case URem:
3273 case SRem:
3274 assert(getType() == LHS->getType() &&
3275 "Arithmetic operation should return same type as operands!");
3276 assert(getType()->isIntOrIntVectorTy() &&
3277 "Incorrect operand type (not integer) for S/UREM");
3278 break;
3279 case FRem:
3280 assert(getType() == LHS->getType() &&
3281 "Arithmetic operation should return same type as operands!");
3282 assert(getType()->isFPOrFPVectorTy() &&
3283 "Incorrect operand type (not floating point) for FREM");
3284 break;
3285 case Shl:
3286 case LShr:
3287 case AShr:
3288 assert(getType() == LHS->getType() &&
3289 "Shift operation should return same type as operands!");
3290 assert(getType()->isIntOrIntVectorTy() &&
3291 "Tried to create a shift operation on a non-integral type!");
3292 break;
3293 case And: case Or:
3294 case Xor:
3295 assert(getType() == LHS->getType() &&
3296 "Logical operation should return same type as operands!");
3297 assert(getType()->isIntOrIntVectorTy() &&
3298 "Tried to create a logical operation on a non-integral type!");
3299 break;
3300 default: llvm_unreachable("Invalid opcode provided");
3301 }
3302#endif
3303}
3304
3305BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
3306 const Twine &Name,
3307 BasicBlock::iterator InsertBefore) {
3308 assert(S1->getType() == S2->getType() &&
3309 "Cannot create binary operator with two operands of differing type!");
3310 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
3311}
3312
3313BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
3314 const Twine &Name,
3315 Instruction *InsertBefore) {
3316 assert(S1->getType() == S2->getType() &&
3317 "Cannot create binary operator with two operands of differing type!");
3318 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
3319}
3320
3321BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
3322 const Twine &Name,
3323 BasicBlock *InsertAtEnd) {
3324 BinaryOperator *Res = Create(Op, S1, S2, Name);
3325 Res->insertInto(ParentBB: InsertAtEnd, It: InsertAtEnd->end());
3326 return Res;
3327}
3328
3329BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
3330 BasicBlock::iterator InsertBefore) {
3331 Value *Zero = ConstantInt::get(Ty: Op->getType(), V: 0);
3332 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
3333 InsertBefore);
3334}
3335
3336BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
3337 BasicBlock *InsertAtEnd) {
3338 Value *Zero = ConstantInt::get(Ty: Op->getType(), V: 0);
3339 return new BinaryOperator(Instruction::Sub,
3340 Zero, Op,
3341 Op->getType(), Name, InsertAtEnd);
3342}
3343
3344BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
3345 Instruction *InsertBefore) {
3346 Value *Zero = ConstantInt::get(Ty: Op->getType(), V: 0);
3347 return BinaryOperator::CreateNSWSub(V1: Zero, V2: Op, Name, I: InsertBefore);
3348}
3349
3350BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
3351 BasicBlock *InsertAtEnd) {
3352 Value *Zero = ConstantInt::get(Ty: Op->getType(), V: 0);
3353 return BinaryOperator::CreateNSWSub(V1: Zero, V2: Op, Name, BB: InsertAtEnd);
3354}
3355
3356BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
3357 BasicBlock::iterator InsertBefore) {
3358 Constant *C = Constant::getAllOnesValue(Ty: Op->getType());
3359 return new BinaryOperator(Instruction::Xor, Op, C,
3360 Op->getType(), Name, InsertBefore);
3361}
3362
3363BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
3364 Instruction *InsertBefore) {
3365 Constant *C = Constant::getAllOnesValue(Ty: Op->getType());
3366 return new BinaryOperator(Instruction::Xor, Op, C,
3367 Op->getType(), Name, InsertBefore);
3368}
3369
3370BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
3371 BasicBlock *InsertAtEnd) {
3372 Constant *AllOnes = Constant::getAllOnesValue(Ty: Op->getType());
3373 return new BinaryOperator(Instruction::Xor, Op, AllOnes,
3374 Op->getType(), Name, InsertAtEnd);
3375}
3376
3377// Exchange the two operands to this instruction. This instruction is safe to
3378// use on any binary instruction and does not modify the semantics of the
3379// instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
3380// is changed.
3381bool BinaryOperator::swapOperands() {
3382 if (!isCommutative())
3383 return true; // Can't commute operands
3384 Op<0>().swap(RHS&: Op<1>());
3385 return false;
3386}
3387
3388//===----------------------------------------------------------------------===//
3389// FPMathOperator Class
3390//===----------------------------------------------------------------------===//
3391
3392float FPMathOperator::getFPAccuracy() const {
3393 const MDNode *MD =
3394 cast<Instruction>(Val: this)->getMetadata(KindID: LLVMContext::MD_fpmath);
3395 if (!MD)
3396 return 0.0;
3397 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD: MD->getOperand(I: 0));
3398 return Accuracy->getValueAPF().convertToFloat();
3399}
3400
3401//===----------------------------------------------------------------------===//
3402// CastInst Class
3403//===----------------------------------------------------------------------===//
3404
3405// Just determine if this cast only deals with integral->integral conversion.
3406bool CastInst::isIntegerCast() const {
3407 switch (getOpcode()) {
3408 default: return false;
3409 case Instruction::ZExt:
3410 case Instruction::SExt:
3411 case Instruction::Trunc:
3412 return true;
3413 case Instruction::BitCast:
3414 return getOperand(i_nocapture: 0)->getType()->isIntegerTy() &&
3415 getType()->isIntegerTy();
3416 }
3417}
3418
3419/// This function determines if the CastInst does not require any bits to be
3420/// changed in order to effect the cast. Essentially, it identifies cases where
3421/// no code gen is necessary for the cast, hence the name no-op cast. For
3422/// example, the following are all no-op casts:
3423/// # bitcast i32* %x to i8*
3424/// # bitcast <2 x i32> %x to <4 x i16>
3425/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
3426/// Determine if the described cast is a no-op.
3427bool CastInst::isNoopCast(Instruction::CastOps Opcode,
3428 Type *SrcTy,
3429 Type *DestTy,
3430 const DataLayout &DL) {
3431 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
3432 switch (Opcode) {
3433 default: llvm_unreachable("Invalid CastOp");
3434 case Instruction::Trunc:
3435 case Instruction::ZExt:
3436 case Instruction::SExt:
3437 case Instruction::FPTrunc:
3438 case Instruction::FPExt:
3439 case Instruction::UIToFP:
3440 case Instruction::SIToFP:
3441 case Instruction::FPToUI:
3442 case Instruction::FPToSI:
3443 case Instruction::AddrSpaceCast:
3444 // TODO: Target informations may give a more accurate answer here.
3445 return false;
3446 case Instruction::BitCast:
3447 return true; // BitCast never modifies bits.
3448 case Instruction::PtrToInt:
3449 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
3450 DestTy->getScalarSizeInBits();
3451 case Instruction::IntToPtr:
3452 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
3453 SrcTy->getScalarSizeInBits();
3454 }
3455}
3456
3457bool CastInst::isNoopCast(const DataLayout &DL) const {
3458 return isNoopCast(Opcode: getOpcode(), SrcTy: getOperand(i_nocapture: 0)->getType(), DestTy: getType(), DL);
3459}
3460
3461/// This function determines if a pair of casts can be eliminated and what
3462/// opcode should be used in the elimination. This assumes that there are two
3463/// instructions like this:
3464/// * %F = firstOpcode SrcTy %x to MidTy
3465/// * %S = secondOpcode MidTy %F to DstTy
3466/// The function returns a resultOpcode so these two casts can be replaced with:
3467/// * %Replacement = resultOpcode %SrcTy %x to DstTy
3468/// If no such cast is permitted, the function returns 0.
3469unsigned CastInst::isEliminableCastPair(
3470 Instruction::CastOps firstOp, Instruction::CastOps secondOp,
3471 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
3472 Type *DstIntPtrTy) {
3473 // Define the 144 possibilities for these two cast instructions. The values
3474 // in this matrix determine what to do in a given situation and select the
3475 // case in the switch below. The rows correspond to firstOp, the columns
3476 // correspond to secondOp. In looking at the table below, keep in mind
3477 // the following cast properties:
3478 //
3479 // Size Compare Source Destination
3480 // Operator Src ? Size Type Sign Type Sign
3481 // -------- ------------ ------------------- ---------------------
3482 // TRUNC > Integer Any Integral Any
3483 // ZEXT < Integral Unsigned Integer Any
3484 // SEXT < Integral Signed Integer Any
3485 // FPTOUI n/a FloatPt n/a Integral Unsigned
3486 // FPTOSI n/a FloatPt n/a Integral Signed
3487 // UITOFP n/a Integral Unsigned FloatPt n/a
3488 // SITOFP n/a Integral Signed FloatPt n/a
3489 // FPTRUNC > FloatPt n/a FloatPt n/a
3490 // FPEXT < FloatPt n/a FloatPt n/a
3491 // PTRTOINT n/a Pointer n/a Integral Unsigned
3492 // INTTOPTR n/a Integral Unsigned Pointer n/a
3493 // BITCAST = FirstClass n/a FirstClass n/a
3494 // ADDRSPCST n/a Pointer n/a Pointer n/a
3495 //
3496 // NOTE: some transforms are safe, but we consider them to be non-profitable.
3497 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
3498 // into "fptoui double to i64", but this loses information about the range
3499 // of the produced value (we no longer know the top-part is all zeros).
3500 // Further this conversion is often much more expensive for typical hardware,
3501 // and causes issues when building libgcc. We disallow fptosi+sext for the
3502 // same reason.
3503 const unsigned numCastOps =
3504 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
3505 static const uint8_t CastResults[numCastOps][numCastOps] = {
3506 // T F F U S F F P I B A -+
3507 // R Z S P P I I T P 2 N T S |
3508 // U E E 2 2 2 2 R E I T C C +- secondOp
3509 // N X X U S F F N X N 2 V V |
3510 // C T T I I P P C T T P T T -+
3511 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
3512 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
3513 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
3514 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
3515 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
3516 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
3517 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
3518 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
3519 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
3520 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
3521 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
3522 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16, 5, 1,14}, // BitCast |
3523 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
3524 };
3525
3526 // TODO: This logic could be encoded into the table above and handled in the
3527 // switch below.
3528 // If either of the casts are a bitcast from scalar to vector, disallow the
3529 // merging. However, any pair of bitcasts are allowed.
3530 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
3531 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
3532 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
3533
3534 // Check if any of the casts convert scalars <-> vectors.
3535 if ((IsFirstBitcast && isa<VectorType>(Val: SrcTy) != isa<VectorType>(Val: MidTy)) ||
3536 (IsSecondBitcast && isa<VectorType>(Val: MidTy) != isa<VectorType>(Val: DstTy)))
3537 if (!AreBothBitcasts)
3538 return 0;
3539
3540 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
3541 [secondOp-Instruction::CastOpsBegin];
3542 switch (ElimCase) {
3543 case 0:
3544 // Categorically disallowed.
3545 return 0;
3546 case 1:
3547 // Allowed, use first cast's opcode.
3548 return firstOp;
3549 case 2:
3550 // Allowed, use second cast's opcode.
3551 return secondOp;
3552 case 3:
3553 // No-op cast in second op implies firstOp as long as the DestTy
3554 // is integer and we are not converting between a vector and a
3555 // non-vector type.
3556 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
3557 return firstOp;
3558 return 0;
3559 case 4:
3560 // No-op cast in second op implies firstOp as long as the DestTy
3561 // matches MidTy.
3562 if (DstTy == MidTy)
3563 return firstOp;
3564 return 0;
3565 case 5:
3566 // No-op cast in first op implies secondOp as long as the SrcTy
3567 // is an integer.
3568 if (SrcTy->isIntegerTy())
3569 return secondOp;
3570 return 0;
3571 case 7: {
3572 // Disable inttoptr/ptrtoint optimization if enabled.
3573 if (DisableI2pP2iOpt)
3574 return 0;
3575
3576 // Cannot simplify if address spaces are different!
3577 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3578 return 0;
3579
3580 unsigned MidSize = MidTy->getScalarSizeInBits();
3581 // We can still fold this without knowing the actual sizes as long we
3582 // know that the intermediate pointer is the largest possible
3583 // pointer size.
3584 // FIXME: Is this always true?
3585 if (MidSize == 64)
3586 return Instruction::BitCast;
3587
3588 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
3589 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
3590 return 0;
3591 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
3592 if (MidSize >= PtrSize)
3593 return Instruction::BitCast;
3594 return 0;
3595 }
3596 case 8: {
3597 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
3598 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
3599 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
3600 unsigned SrcSize = SrcTy->getScalarSizeInBits();
3601 unsigned DstSize = DstTy->getScalarSizeInBits();
3602 if (SrcTy == DstTy)
3603 return Instruction::BitCast;
3604 if (SrcSize < DstSize)
3605 return firstOp;
3606 if (SrcSize > DstSize)
3607 return secondOp;
3608 return 0;
3609 }
3610 case 9:
3611 // zext, sext -> zext, because sext can't sign extend after zext
3612 return Instruction::ZExt;
3613 case 11: {
3614 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
3615 if (!MidIntPtrTy)
3616 return 0;
3617 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
3618 unsigned SrcSize = SrcTy->getScalarSizeInBits();
3619 unsigned DstSize = DstTy->getScalarSizeInBits();
3620 if (SrcSize <= PtrSize && SrcSize == DstSize)
3621 return Instruction::BitCast;
3622 return 0;
3623 }
3624 case 12:
3625 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
3626 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
3627 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3628 return Instruction::AddrSpaceCast;
3629 return Instruction::BitCast;
3630 case 13:
3631 // FIXME: this state can be merged with (1), but the following assert
3632 // is useful to check the correcteness of the sequence due to semantic
3633 // change of bitcast.
3634 assert(
3635 SrcTy->isPtrOrPtrVectorTy() &&
3636 MidTy->isPtrOrPtrVectorTy() &&
3637 DstTy->isPtrOrPtrVectorTy() &&
3638 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
3639 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3640 "Illegal addrspacecast, bitcast sequence!");
3641 // Allowed, use first cast's opcode
3642 return firstOp;
3643 case 14:
3644 // bitcast, addrspacecast -> addrspacecast
3645 return Instruction::AddrSpaceCast;
3646 case 15:
3647 // FIXME: this state can be merged with (1), but the following assert
3648 // is useful to check the correcteness of the sequence due to semantic
3649 // change of bitcast.
3650 assert(
3651 SrcTy->isIntOrIntVectorTy() &&
3652 MidTy->isPtrOrPtrVectorTy() &&
3653 DstTy->isPtrOrPtrVectorTy() &&
3654 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3655 "Illegal inttoptr, bitcast sequence!");
3656 // Allowed, use first cast's opcode
3657 return firstOp;
3658 case 16:
3659 // FIXME: this state can be merged with (2), but the following assert
3660 // is useful to check the correcteness of the sequence due to semantic
3661 // change of bitcast.
3662 assert(
3663 SrcTy->isPtrOrPtrVectorTy() &&
3664 MidTy->isPtrOrPtrVectorTy() &&
3665 DstTy->isIntOrIntVectorTy() &&
3666 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3667 "Illegal bitcast, ptrtoint sequence!");
3668 // Allowed, use second cast's opcode
3669 return secondOp;
3670 case 17:
3671 // (sitofp (zext x)) -> (uitofp x)
3672 return Instruction::UIToFP;
3673 case 99:
3674 // Cast combination can't happen (error in input). This is for all cases
3675 // where the MidTy is not the same for the two cast instructions.
3676 llvm_unreachable("Invalid Cast Combination");
3677 default:
3678 llvm_unreachable("Error in CastResults table!!!");
3679 }
3680}
3681
3682CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
3683 const Twine &Name,
3684 BasicBlock::iterator InsertBefore) {
3685 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3686 // Construct and return the appropriate CastInst subclass
3687 switch (op) {
3688 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3689 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3690 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3691 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3692 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3693 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3694 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3695 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3696 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3697 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3698 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3699 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
3700 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
3701 default: llvm_unreachable("Invalid opcode provided");
3702 }
3703}
3704
3705CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
3706 const Twine &Name, Instruction *InsertBefore) {
3707 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3708 // Construct and return the appropriate CastInst subclass
3709 switch (op) {
3710 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3711 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3712 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3713 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3714 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3715 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3716 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3717 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3718 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3719 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3720 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3721 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
3722 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
3723 default: llvm_unreachable("Invalid opcode provided");
3724 }
3725}
3726
3727CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
3728 const Twine &Name, BasicBlock *InsertAtEnd) {
3729 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3730 // Construct and return the appropriate CastInst subclass
3731 switch (op) {
3732 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
3733 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
3734 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
3735 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
3736 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
3737 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
3738 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
3739 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
3740 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
3741 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
3742 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
3743 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
3744 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
3745 default: llvm_unreachable("Invalid opcode provided");
3746 }
3747}
3748
3749CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name,
3750 BasicBlock::iterator InsertBefore) {
3751 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3752 return Create(op: Instruction::BitCast, S, Ty, Name, InsertBefore);
3753 return Create(op: Instruction::ZExt, S, Ty, Name, InsertBefore);
3754}
3755
3756CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
3757 const Twine &Name,
3758 Instruction *InsertBefore) {
3759 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3760 return Create(op: Instruction::BitCast, S, Ty, Name, InsertBefore);
3761 return Create(op: Instruction::ZExt, S, Ty, Name, InsertBefore);
3762}
3763
3764CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
3765 const Twine &Name,
3766 BasicBlock *InsertAtEnd) {
3767 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3768 return Create(op: Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3769 return Create(op: Instruction::ZExt, S, Ty, Name, InsertAtEnd);
3770}
3771
3772CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name,
3773 BasicBlock::iterator InsertBefore) {
3774 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3775 return Create(op: Instruction::BitCast, S, Ty, Name, InsertBefore);
3776 return Create(op: Instruction::SExt, S, Ty, Name, InsertBefore);
3777}
3778
3779CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
3780 const Twine &Name,
3781 Instruction *InsertBefore) {
3782 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3783 return Create(op: Instruction::BitCast, S, Ty, Name, InsertBefore);
3784 return Create(op: Instruction::SExt, S, Ty, Name, InsertBefore);
3785}
3786
3787CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
3788 const Twine &Name,
3789 BasicBlock *InsertAtEnd) {
3790 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3791 return Create(op: Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3792 return Create(op: Instruction::SExt, S, Ty, Name, InsertAtEnd);
3793}
3794
3795CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name,
3796 BasicBlock::iterator InsertBefore) {
3797 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3798 return Create(op: Instruction::BitCast, S, Ty, Name, InsertBefore);
3799 return Create(op: Instruction::Trunc, S, Ty, Name, InsertBefore);
3800}
3801
3802CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
3803 const Twine &Name,
3804 Instruction *InsertBefore) {
3805 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3806 return Create(op: Instruction::BitCast, S, Ty, Name, InsertBefore);
3807 return Create(op: Instruction::Trunc, S, Ty, Name, InsertBefore);
3808}
3809
3810CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
3811 const Twine &Name,
3812 BasicBlock *InsertAtEnd) {
3813 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3814 return Create(op: Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3815 return Create(op: Instruction::Trunc, S, Ty, Name, InsertAtEnd);
3816}
3817
3818CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
3819 const Twine &Name,
3820 BasicBlock *InsertAtEnd) {
3821 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3822 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3823 "Invalid cast");
3824 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3825 assert((!Ty->isVectorTy() ||
3826 cast<VectorType>(Ty)->getElementCount() ==
3827 cast<VectorType>(S->getType())->getElementCount()) &&
3828 "Invalid cast");
3829
3830 if (Ty->isIntOrIntVectorTy())
3831 return Create(op: Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
3832
3833 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
3834}
3835
3836/// Create a BitCast or a PtrToInt cast instruction
3837CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, const Twine &Name,
3838 BasicBlock::iterator InsertBefore) {
3839 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3840 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3841 "Invalid cast");
3842 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3843 assert((!Ty->isVectorTy() ||
3844 cast<VectorType>(Ty)->getElementCount() ==
3845 cast<VectorType>(S->getType())->getElementCount()) &&
3846 "Invalid cast");
3847
3848 if (Ty->isIntOrIntVectorTy())
3849 return Create(op: Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3850
3851 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3852}
3853
3854/// Create a BitCast or a PtrToInt cast instruction
3855CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, const Twine &Name,
3856 Instruction *InsertBefore) {
3857 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3858 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3859 "Invalid cast");
3860 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3861 assert((!Ty->isVectorTy() ||
3862 cast<VectorType>(Ty)->getElementCount() ==
3863 cast<VectorType>(S->getType())->getElementCount()) &&
3864 "Invalid cast");
3865
3866 if (Ty->isIntOrIntVectorTy())
3867 return Create(op: Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3868
3869 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3870}
3871
3872CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
3873 Value *S, Type *Ty,
3874 const Twine &Name,
3875 BasicBlock *InsertAtEnd) {
3876 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3877 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3878
3879 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3880 return Create(op: Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
3881
3882 return Create(op: Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3883}
3884
3885CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
3886 Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore) {
3887 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3888 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3889
3890 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3891 return Create(op: Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3892
3893 return Create(op: Instruction::BitCast, S, Ty, Name, InsertBefore);
3894}
3895
3896CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
3897 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore) {
3898 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3899 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3900
3901 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3902 return Create(op: Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3903
3904 return Create(op: Instruction::BitCast, S, Ty, Name, InsertBefore);
3905}
3906
3907CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
3908 const Twine &Name,
3909 BasicBlock::iterator InsertBefore) {
3910 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3911 return Create(op: Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3912 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3913 return Create(op: Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3914
3915 return Create(op: Instruction::BitCast, S, Ty, Name, InsertBefore);
3916}
3917
3918CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
3919 const Twine &Name,
3920 Instruction *InsertBefore) {
3921 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3922 return Create(op: Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3923 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3924 return Create(op: Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3925
3926 return Create(op: Instruction::BitCast, S, Ty, Name, InsertBefore);
3927}
3928
3929CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, bool isSigned,
3930 const Twine &Name,
3931 BasicBlock::iterator InsertBefore) {
3932 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3933 "Invalid integer cast");
3934 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3935 unsigned DstBits = Ty->getScalarSizeInBits();
3936 Instruction::CastOps opcode =
3937 (SrcBits == DstBits ? Instruction::BitCast :
3938 (SrcBits > DstBits ? Instruction::Trunc :
3939 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3940 return Create(op: opcode, S: C, Ty, Name, InsertBefore);
3941}
3942
3943CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
3944 bool isSigned, const Twine &Name,
3945 Instruction *InsertBefore) {
3946 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3947 "Invalid integer cast");
3948 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3949 unsigned DstBits = Ty->getScalarSizeInBits();
3950 Instruction::CastOps opcode =
3951 (SrcBits == DstBits ? Instruction::BitCast :
3952 (SrcBits > DstBits ? Instruction::Trunc :
3953 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3954 return Create(op: opcode, S: C, Ty, Name, InsertBefore);
3955}
3956
3957CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
3958 bool isSigned, const Twine &Name,
3959 BasicBlock *InsertAtEnd) {
3960 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3961 "Invalid cast");
3962 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3963 unsigned DstBits = Ty->getScalarSizeInBits();
3964 Instruction::CastOps opcode =
3965 (SrcBits == DstBits ? Instruction::BitCast :
3966 (SrcBits > DstBits ? Instruction::Trunc :
3967 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3968 return Create(op: opcode, S: C, Ty, Name, InsertAtEnd);
3969}
3970
3971CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, const Twine &Name,
3972 BasicBlock::iterator InsertBefore) {
3973 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3974 "Invalid cast");
3975 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3976 unsigned DstBits = Ty->getScalarSizeInBits();
3977 Instruction::CastOps opcode =
3978 (SrcBits == DstBits ? Instruction::BitCast :
3979 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3980 return Create(op: opcode, S: C, Ty, Name, InsertBefore);
3981}
3982
3983CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
3984 const Twine &Name,
3985 Instruction *InsertBefore) {
3986 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3987 "Invalid cast");
3988 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3989 unsigned DstBits = Ty->getScalarSizeInBits();
3990 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
3991 Instruction::CastOps opcode =
3992 (SrcBits == DstBits ? Instruction::BitCast :
3993 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3994 return Create(op: opcode, S: C, Ty, Name, InsertBefore);
3995}
3996
3997CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
3998 const Twine &Name,
3999 BasicBlock *InsertAtEnd) {
4000 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
4001 "Invalid cast");
4002 unsigned SrcBits = C->getType()->getScalarSizeInBits();
4003 unsigned DstBits = Ty->getScalarSizeInBits();
4004 Instruction::CastOps opcode =
4005 (SrcBits == DstBits ? Instruction::BitCast :
4006 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
4007 return Create(op: opcode, S: C, Ty, Name, InsertAtEnd);
4008}
4009
4010bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
4011 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
4012 return false;
4013
4014 if (SrcTy == DestTy)
4015 return true;
4016
4017 if (VectorType *SrcVecTy = dyn_cast<VectorType>(Val: SrcTy)) {
4018 if (VectorType *DestVecTy = dyn_cast<VectorType>(Val: DestTy)) {
4019 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
4020 // An element by element cast. Valid if casting the elements is valid.
4021 SrcTy = SrcVecTy->getElementType();
4022 DestTy = DestVecTy->getElementType();
4023 }
4024 }
4025 }
4026
4027 if (PointerType *DestPtrTy = dyn_cast<PointerType>(Val: DestTy)) {
4028 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(Val: SrcTy)) {
4029 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
4030 }
4031 }
4032
4033 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
4034 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
4035
4036 // Could still have vectors of pointers if the number of elements doesn't
4037 // match
4038 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
4039 return false;
4040
4041 if (SrcBits != DestBits)
4042 return false;
4043
4044 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
4045 return false;
4046
4047 return true;
4048}
4049
4050bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy,
4051 const DataLayout &DL) {
4052 // ptrtoint and inttoptr are not allowed on non-integral pointers
4053 if (auto *PtrTy = dyn_cast<PointerType>(Val: SrcTy))
4054 if (auto *IntTy = dyn_cast<IntegerType>(Val: DestTy))
4055 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
4056 !DL.isNonIntegralPointerType(PT: PtrTy));
4057 if (auto *PtrTy = dyn_cast<PointerType>(Val: DestTy))
4058 if (auto *IntTy = dyn_cast<IntegerType>(Val: SrcTy))
4059 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
4060 !DL.isNonIntegralPointerType(PT: PtrTy));
4061
4062 return isBitCastable(SrcTy, DestTy);
4063}
4064
4065// Provide a way to get a "cast" where the cast opcode is inferred from the
4066// types and size of the operand. This, basically, is a parallel of the
4067// logic in the castIsValid function below. This axiom should hold:
4068// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
4069// should not assert in castIsValid. In other words, this produces a "correct"
4070// casting opcode for the arguments passed to it.
4071Instruction::CastOps
4072CastInst::getCastOpcode(
4073 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
4074 Type *SrcTy = Src->getType();
4075
4076 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
4077 "Only first class types are castable!");
4078
4079 if (SrcTy == DestTy)
4080 return BitCast;
4081
4082 // FIXME: Check address space sizes here
4083 if (VectorType *SrcVecTy = dyn_cast<VectorType>(Val: SrcTy))
4084 if (VectorType *DestVecTy = dyn_cast<VectorType>(Val: DestTy))
4085 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
4086 // An element by element cast. Find the appropriate opcode based on the
4087 // element types.
4088 SrcTy = SrcVecTy->getElementType();
4089 DestTy = DestVecTy->getElementType();
4090 }
4091
4092 // Get the bit sizes, we'll need these
4093 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
4094 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
4095
4096 // Run through the possibilities ...
4097 if (DestTy->isIntegerTy()) { // Casting to integral
4098 if (SrcTy->isIntegerTy()) { // Casting from integral
4099 if (DestBits < SrcBits)
4100 return Trunc; // int -> smaller int
4101 else if (DestBits > SrcBits) { // its an extension
4102 if (SrcIsSigned)
4103 return SExt; // signed -> SEXT
4104 else
4105 return ZExt; // unsigned -> ZEXT
4106 } else {
4107 return BitCast; // Same size, No-op cast
4108 }
4109 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
4110 if (DestIsSigned)
4111 return FPToSI; // FP -> sint
4112 else
4113 return FPToUI; // FP -> uint
4114 } else if (SrcTy->isVectorTy()) {
4115 assert(DestBits == SrcBits &&
4116 "Casting vector to integer of different width");
4117 return BitCast; // Same size, no-op cast
4118 } else {
4119 assert(SrcTy->isPointerTy() &&
4120 "Casting from a value that is not first-class type");
4121 return PtrToInt; // ptr -> int
4122 }
4123 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
4124 if (SrcTy->isIntegerTy()) { // Casting from integral
4125 if (SrcIsSigned)
4126 return SIToFP; // sint -> FP
4127 else
4128 return UIToFP; // uint -> FP
4129 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
4130 if (DestBits < SrcBits) {
4131 return FPTrunc; // FP -> smaller FP
4132 } else if (DestBits > SrcBits) {
4133 return FPExt; // FP -> larger FP
4134 } else {
4135 return BitCast; // same size, no-op cast
4136 }
4137 } else if (SrcTy->isVectorTy()) {
4138 assert(DestBits == SrcBits &&
4139 "Casting vector to floating point of different width");
4140 return BitCast; // same size, no-op cast
4141 }
4142 llvm_unreachable("Casting pointer or non-first class to float");
4143 } else if (DestTy->isVectorTy()) {
4144 assert(DestBits == SrcBits &&
4145 "Illegal cast to vector (wrong type or size)");
4146 return BitCast;
4147 } else if (DestTy->isPointerTy()) {
4148 if (SrcTy->isPointerTy()) {
4149 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
4150 return AddrSpaceCast;
4151 return BitCast; // ptr -> ptr
4152 } else if (SrcTy->isIntegerTy()) {
4153 return IntToPtr; // int -> ptr
4154 }
4155 llvm_unreachable("Casting pointer to other than pointer or int");
4156 } else if (DestTy->isX86_MMXTy()) {
4157 if (SrcTy->isVectorTy()) {
4158 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
4159 return BitCast; // 64-bit vector to MMX
4160 }
4161 llvm_unreachable("Illegal cast to X86_MMX");
4162 }
4163 llvm_unreachable("Casting to type that is not first-class");
4164}
4165
4166//===----------------------------------------------------------------------===//
4167// CastInst SubClass Constructors
4168//===----------------------------------------------------------------------===//
4169
4170/// Check that the construction parameters for a CastInst are correct. This
4171/// could be broken out into the separate constructors but it is useful to have
4172/// it in one place and to eliminate the redundant code for getting the sizes
4173/// of the types involved.
4174bool
4175CastInst::castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy) {
4176 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
4177 SrcTy->isAggregateType() || DstTy->isAggregateType())
4178 return false;
4179
4180 // Get the size of the types in bits, and whether we are dealing
4181 // with vector types, we'll need this later.
4182 bool SrcIsVec = isa<VectorType>(Val: SrcTy);
4183 bool DstIsVec = isa<VectorType>(Val: DstTy);
4184 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
4185 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
4186
4187 // If these are vector types, get the lengths of the vectors (using zero for
4188 // scalar types means that checking that vector lengths match also checks that
4189 // scalars are not being converted to vectors or vectors to scalars).
4190 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(Val: SrcTy)->getElementCount()
4191 : ElementCount::getFixed(MinVal: 0);
4192 ElementCount DstEC = DstIsVec ? cast<VectorType>(Val: DstTy)->getElementCount()
4193 : ElementCount::getFixed(MinVal: 0);
4194
4195 // Switch on the opcode provided
4196 switch (op) {
4197 default: return false; // This is an input error
4198 case Instruction::Trunc:
4199 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
4200 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
4201 case Instruction::ZExt:
4202 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
4203 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
4204 case Instruction::SExt:
4205 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
4206 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
4207 case Instruction::FPTrunc:
4208 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
4209 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
4210 case Instruction::FPExt:
4211 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
4212 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
4213 case Instruction::UIToFP:
4214 case Instruction::SIToFP:
4215 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
4216 SrcEC == DstEC;
4217 case Instruction::FPToUI:
4218 case Instruction::FPToSI:
4219 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
4220 SrcEC == DstEC;
4221 case Instruction::PtrToInt:
4222 if (SrcEC != DstEC)
4223 return false;
4224 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
4225 case Instruction::IntToPtr:
4226 if (SrcEC != DstEC)
4227 return false;
4228 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
4229 case Instruction::BitCast: {
4230 PointerType *SrcPtrTy = dyn_cast<PointerType>(Val: SrcTy->getScalarType());
4231 PointerType *DstPtrTy = dyn_cast<PointerType>(Val: DstTy->getScalarType());
4232
4233 // BitCast implies a no-op cast of type only. No bits change.
4234 // However, you can't cast pointers to anything but pointers.
4235 if (!SrcPtrTy != !DstPtrTy)
4236 return false;
4237
4238 // For non-pointer cases, the cast is okay if the source and destination bit
4239 // widths are identical.
4240 if (!SrcPtrTy)
4241 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
4242
4243 // If both are pointers then the address spaces must match.
4244 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
4245 return false;
4246
4247 // A vector of pointers must have the same number of elements.
4248 if (SrcIsVec && DstIsVec)
4249 return SrcEC == DstEC;
4250 if (SrcIsVec)
4251 return SrcEC == ElementCount::getFixed(MinVal: 1);
4252 if (DstIsVec)
4253 return DstEC == ElementCount::getFixed(MinVal: 1);
4254
4255 return true;
4256 }
4257 case Instruction::AddrSpaceCast: {
4258 PointerType *SrcPtrTy = dyn_cast<PointerType>(Val: SrcTy->getScalarType());
4259 if (!SrcPtrTy)
4260 return false;
4261
4262 PointerType *DstPtrTy = dyn_cast<PointerType>(Val: DstTy->getScalarType());
4263 if (!DstPtrTy)
4264 return false;
4265
4266 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
4267 return false;
4268
4269 return SrcEC == DstEC;
4270 }
4271 }
4272}
4273
4274TruncInst::TruncInst(Value *S, Type *Ty, const Twine &Name,
4275 BasicBlock::iterator InsertBefore)
4276 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
4277 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
4278}
4279
4280TruncInst::TruncInst(
4281 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4282) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
4283 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
4284}
4285
4286TruncInst::TruncInst(
4287 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4288) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
4289 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
4290}
4291
4292ZExtInst::ZExtInst(Value *S, Type *Ty, const Twine &Name,
4293 BasicBlock::iterator InsertBefore)
4294 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
4295 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
4296}
4297
4298ZExtInst::ZExtInst(
4299 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4300) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
4301 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
4302}
4303
4304ZExtInst::ZExtInst(
4305 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4306) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
4307 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
4308}
4309
4310SExtInst::SExtInst(Value *S, Type *Ty, const Twine &Name,
4311 BasicBlock::iterator InsertBefore)
4312 : CastInst(Ty, SExt, S, Name, InsertBefore) {
4313 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
4314}
4315
4316SExtInst::SExtInst(
4317 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4318) : CastInst(Ty, SExt, S, Name, InsertBefore) {
4319 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
4320}
4321
4322SExtInst::SExtInst(
4323 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4324) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
4325 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
4326}
4327
4328FPTruncInst::FPTruncInst(Value *S, Type *Ty, const Twine &Name,
4329 BasicBlock::iterator InsertBefore)
4330 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
4331 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
4332}
4333
4334FPTruncInst::FPTruncInst(
4335 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4336) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
4337 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
4338}
4339
4340FPTruncInst::FPTruncInst(
4341 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4342) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
4343 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
4344}
4345
4346FPExtInst::FPExtInst(Value *S, Type *Ty, const Twine &Name,
4347 BasicBlock::iterator InsertBefore)
4348 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
4349 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
4350}
4351
4352FPExtInst::FPExtInst(
4353 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4354) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
4355 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
4356}
4357
4358FPExtInst::FPExtInst(
4359 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4360) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
4361 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
4362}
4363
4364UIToFPInst::UIToFPInst(Value *S, Type *Ty, const Twine &Name,
4365 BasicBlock::iterator InsertBefore)
4366 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
4367 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
4368}
4369
4370UIToFPInst::UIToFPInst(
4371 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4372) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
4373 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
4374}
4375
4376UIToFPInst::UIToFPInst(
4377 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4378) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
4379 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
4380}
4381
4382SIToFPInst::SIToFPInst(Value *S, Type *Ty, const Twine &Name,
4383 BasicBlock::iterator InsertBefore)
4384 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
4385 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
4386}
4387
4388SIToFPInst::SIToFPInst(
4389 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4390) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
4391 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
4392}
4393
4394SIToFPInst::SIToFPInst(
4395 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4396) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
4397 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
4398}
4399
4400FPToUIInst::FPToUIInst(Value *S, Type *Ty, const Twine &Name,
4401 BasicBlock::iterator InsertBefore)
4402 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
4403 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
4404}
4405
4406FPToUIInst::FPToUIInst(
4407 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4408) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
4409 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
4410}
4411
4412FPToUIInst::FPToUIInst(
4413 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4414) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
4415 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
4416}
4417
4418FPToSIInst::FPToSIInst(Value *S, Type *Ty, const Twine &Name,
4419 BasicBlock::iterator InsertBefore)
4420 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
4421 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
4422}
4423
4424FPToSIInst::FPToSIInst(
4425 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4426) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
4427 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
4428}
4429
4430FPToSIInst::FPToSIInst(
4431 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4432) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
4433 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
4434}
4435
4436PtrToIntInst::PtrToIntInst(Value *S, Type *Ty, const Twine &Name,
4437 BasicBlock::iterator InsertBefore)
4438 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
4439 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
4440}
4441
4442PtrToIntInst::PtrToIntInst(
4443 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4444) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
4445 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
4446}
4447
4448PtrToIntInst::PtrToIntInst(
4449 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4450) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
4451 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
4452}
4453
4454IntToPtrInst::IntToPtrInst(Value *S, Type *Ty, const Twine &Name,
4455 BasicBlock::iterator InsertBefore)
4456 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
4457 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
4458}
4459
4460IntToPtrInst::IntToPtrInst(
4461 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4462) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
4463 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
4464}
4465
4466IntToPtrInst::IntToPtrInst(
4467 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4468) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
4469 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
4470}
4471
4472BitCastInst::BitCastInst(Value *S, Type *Ty, const Twine &Name,
4473 BasicBlock::iterator InsertBefore)
4474 : CastInst(Ty, BitCast, S, Name, InsertBefore) {
4475 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
4476}
4477
4478BitCastInst::BitCastInst(
4479 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4480) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
4481 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
4482}
4483
4484BitCastInst::BitCastInst(
4485 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4486) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
4487 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
4488}
4489
4490AddrSpaceCastInst::AddrSpaceCastInst(Value *S, Type *Ty, const Twine &Name,
4491 BasicBlock::iterator InsertBefore)
4492 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
4493 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
4494}
4495
4496AddrSpaceCastInst::AddrSpaceCastInst(
4497 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4498) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
4499 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
4500}
4501
4502AddrSpaceCastInst::AddrSpaceCastInst(
4503 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4504) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
4505 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
4506}
4507
4508//===----------------------------------------------------------------------===//
4509// CmpInst Classes
4510//===----------------------------------------------------------------------===//
4511
4512CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
4513 Value *RHS, const Twine &Name,
4514 BasicBlock::iterator InsertBefore, Instruction *FlagsSource)
4515 : Instruction(ty, op, OperandTraits<CmpInst>::op_begin(U: this),
4516 OperandTraits<CmpInst>::operands(this), InsertBefore) {
4517 Op<0>() = LHS;
4518 Op<1>() = RHS;
4519 setPredicate((Predicate)predicate);
4520 setName(Name);
4521 if (FlagsSource)
4522 copyIRFlags(V: FlagsSource);
4523}
4524
4525CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
4526 Value *RHS, const Twine &Name, Instruction *InsertBefore,
4527 Instruction *FlagsSource)
4528 : Instruction(ty, op,
4529 OperandTraits<CmpInst>::op_begin(U: this),
4530 OperandTraits<CmpInst>::operands(this),
4531 InsertBefore) {
4532 Op<0>() = LHS;
4533 Op<1>() = RHS;
4534 setPredicate((Predicate)predicate);
4535 setName(Name);
4536 if (FlagsSource)
4537 copyIRFlags(V: FlagsSource);
4538}
4539
4540CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
4541 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
4542 : Instruction(ty, op,
4543 OperandTraits<CmpInst>::op_begin(U: this),
4544 OperandTraits<CmpInst>::operands(this),
4545 InsertAtEnd) {
4546 Op<0>() = LHS;
4547 Op<1>() = RHS;
4548 setPredicate((Predicate)predicate);
4549 setName(Name);
4550}
4551
4552CmpInst *
4553CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
4554 const Twine &Name, BasicBlock::iterator InsertBefore) {
4555 if (Op == Instruction::ICmp) {
4556 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
4557 S1, S2, Name);
4558 }
4559
4560 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
4561 S1, S2, Name);
4562}
4563
4564CmpInst *
4565CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
4566 const Twine &Name, Instruction *InsertBefore) {
4567 if (Op == Instruction::ICmp) {
4568 if (InsertBefore)
4569 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
4570 S1, S2, Name);
4571 else
4572 return new ICmpInst(CmpInst::Predicate(predicate),
4573 S1, S2, Name);
4574 }
4575
4576 if (InsertBefore)
4577 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
4578 S1, S2, Name);
4579 else
4580 return new FCmpInst(CmpInst::Predicate(predicate),
4581 S1, S2, Name);
4582}
4583
4584CmpInst *
4585CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
4586 const Twine &Name, BasicBlock *InsertAtEnd) {
4587 if (Op == Instruction::ICmp) {
4588 return new ICmpInst(InsertAtEnd, CmpInst::Predicate(predicate),
4589 S1, S2, Name);
4590 }
4591 return new FCmpInst(InsertAtEnd, CmpInst::Predicate(predicate),
4592 S1, S2, Name);
4593}
4594
4595CmpInst *CmpInst::CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1,
4596 Value *S2,
4597 const Instruction *FlagsSource,
4598 const Twine &Name,
4599 Instruction *InsertBefore) {
4600 CmpInst *Inst = Create(Op, predicate: Pred, S1, S2, Name, InsertBefore);
4601 Inst->copyIRFlags(V: FlagsSource);
4602 return Inst;
4603}
4604
4605void CmpInst::swapOperands() {
4606 if (ICmpInst *IC = dyn_cast<ICmpInst>(Val: this))
4607 IC->swapOperands();
4608 else
4609 cast<FCmpInst>(Val: this)->swapOperands();
4610}
4611
4612bool CmpInst::isCommutative() const {
4613 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Val: this))
4614 return IC->isCommutative();
4615 return cast<FCmpInst>(Val: this)->isCommutative();
4616}
4617
4618bool CmpInst::isEquality(Predicate P) {
4619 if (ICmpInst::isIntPredicate(P))
4620 return ICmpInst::isEquality(P);
4621 if (FCmpInst::isFPPredicate(P))
4622 return FCmpInst::isEquality(Pred: P);
4623 llvm_unreachable("Unsupported predicate kind");
4624}
4625
4626CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
4627 switch (pred) {
4628 default: llvm_unreachable("Unknown cmp predicate!");
4629 case ICMP_EQ: return ICMP_NE;
4630 case ICMP_NE: return ICMP_EQ;
4631 case ICMP_UGT: return ICMP_ULE;
4632 case ICMP_ULT: return ICMP_UGE;
4633 case ICMP_UGE: return ICMP_ULT;
4634 case ICMP_ULE: return ICMP_UGT;
4635 case ICMP_SGT: return ICMP_SLE;
4636 case ICMP_SLT: return ICMP_SGE;
4637 case ICMP_SGE: return ICMP_SLT;
4638 case ICMP_SLE: return ICMP_SGT;
4639
4640 case FCMP_OEQ: return FCMP_UNE;
4641 case FCMP_ONE: return FCMP_UEQ;
4642 case FCMP_OGT: return FCMP_ULE;
4643 case FCMP_OLT: return FCMP_UGE;
4644 case FCMP_OGE: return FCMP_ULT;
4645 case FCMP_OLE: return FCMP_UGT;
4646 case FCMP_UEQ: return FCMP_ONE;
4647 case FCMP_UNE: return FCMP_OEQ;
4648 case FCMP_UGT: return FCMP_OLE;
4649 case FCMP_ULT: return FCMP_OGE;
4650 case FCMP_UGE: return FCMP_OLT;
4651 case FCMP_ULE: return FCMP_OGT;
4652 case FCMP_ORD: return FCMP_UNO;
4653 case FCMP_UNO: return FCMP_ORD;
4654 case FCMP_TRUE: return FCMP_FALSE;
4655 case FCMP_FALSE: return FCMP_TRUE;
4656 }
4657}
4658
4659StringRef CmpInst::getPredicateName(Predicate Pred) {
4660 switch (Pred) {
4661 default: return "unknown";
4662 case FCmpInst::FCMP_FALSE: return "false";
4663 case FCmpInst::FCMP_OEQ: return "oeq";
4664 case FCmpInst::FCMP_OGT: return "ogt";
4665 case FCmpInst::FCMP_OGE: return "oge";
4666 case FCmpInst::FCMP_OLT: return "olt";
4667 case FCmpInst::FCMP_OLE: return "ole";
4668 case FCmpInst::FCMP_ONE: return "one";
4669 case FCmpInst::FCMP_ORD: return "ord";
4670 case FCmpInst::FCMP_UNO: return "uno";
4671 case FCmpInst::FCMP_UEQ: return "ueq";
4672 case FCmpInst::FCMP_UGT: return "ugt";
4673 case FCmpInst::FCMP_UGE: return "uge";
4674 case FCmpInst::FCMP_ULT: return "ult";
4675 case FCmpInst::FCMP_ULE: return "ule";
4676 case FCmpInst::FCMP_UNE: return "une";
4677 case FCmpInst::FCMP_TRUE: return "true";
4678 case ICmpInst::ICMP_EQ: return "eq";
4679 case ICmpInst::ICMP_NE: return "ne";
4680 case ICmpInst::ICMP_SGT: return "sgt";
4681 case ICmpInst::ICMP_SGE: return "sge";
4682 case ICmpInst::ICMP_SLT: return "slt";
4683 case ICmpInst::ICMP_SLE: return "sle";
4684 case ICmpInst::ICMP_UGT: return "ugt";
4685 case ICmpInst::ICMP_UGE: return "uge";
4686 case ICmpInst::ICMP_ULT: return "ult";
4687 case ICmpInst::ICMP_ULE: return "ule";
4688 }
4689}
4690
4691raw_ostream &llvm::operator<<(raw_ostream &OS, CmpInst::Predicate Pred) {
4692 OS << CmpInst::getPredicateName(Pred);
4693 return OS;
4694}
4695
4696ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
4697 switch (pred) {
4698 default: llvm_unreachable("Unknown icmp predicate!");
4699 case ICMP_EQ: case ICMP_NE:
4700 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
4701 return pred;
4702 case ICMP_UGT: return ICMP_SGT;
4703 case ICMP_ULT: return ICMP_SLT;
4704 case ICMP_UGE: return ICMP_SGE;
4705 case ICMP_ULE: return ICMP_SLE;
4706 }
4707}
4708
4709ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
4710 switch (pred) {
4711 default: llvm_unreachable("Unknown icmp predicate!");
4712 case ICMP_EQ: case ICMP_NE:
4713 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
4714 return pred;
4715 case ICMP_SGT: return ICMP_UGT;
4716 case ICMP_SLT: return ICMP_ULT;
4717 case ICMP_SGE: return ICMP_UGE;
4718 case ICMP_SLE: return ICMP_ULE;
4719 }
4720}
4721
4722CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {
4723 switch (pred) {
4724 default: llvm_unreachable("Unknown cmp predicate!");
4725 case ICMP_EQ: case ICMP_NE:
4726 return pred;
4727 case ICMP_SGT: return ICMP_SLT;
4728 case ICMP_SLT: return ICMP_SGT;
4729 case ICMP_SGE: return ICMP_SLE;
4730 case ICMP_SLE: return ICMP_SGE;
4731 case ICMP_UGT: return ICMP_ULT;
4732 case ICMP_ULT: return ICMP_UGT;
4733 case ICMP_UGE: return ICMP_ULE;
4734 case ICMP_ULE: return ICMP_UGE;
4735
4736 case FCMP_FALSE: case FCMP_TRUE:
4737 case FCMP_OEQ: case FCMP_ONE:
4738 case FCMP_UEQ: case FCMP_UNE:
4739 case FCMP_ORD: case FCMP_UNO:
4740 return pred;
4741 case FCMP_OGT: return FCMP_OLT;
4742 case FCMP_OLT: return FCMP_OGT;
4743 case FCMP_OGE: return FCMP_OLE;
4744 case FCMP_OLE: return FCMP_OGE;
4745 case FCMP_UGT: return FCMP_ULT;
4746 case FCMP_ULT: return FCMP_UGT;
4747 case FCMP_UGE: return FCMP_ULE;
4748 case FCMP_ULE: return FCMP_UGE;
4749 }
4750}
4751
4752bool CmpInst::isNonStrictPredicate(Predicate pred) {
4753 switch (pred) {
4754 case ICMP_SGE:
4755 case ICMP_SLE:
4756 case ICMP_UGE:
4757 case ICMP_ULE:
4758 case FCMP_OGE:
4759 case FCMP_OLE:
4760 case FCMP_UGE:
4761 case FCMP_ULE:
4762 return true;
4763 default:
4764 return false;
4765 }
4766}
4767
4768bool CmpInst::isStrictPredicate(Predicate pred) {
4769 switch (pred) {
4770 case ICMP_SGT:
4771 case ICMP_SLT:
4772 case ICMP_UGT:
4773 case ICMP_ULT:
4774 case FCMP_OGT:
4775 case FCMP_OLT:
4776 case FCMP_UGT:
4777 case FCMP_ULT:
4778 return true;
4779 default:
4780 return false;
4781 }
4782}
4783
4784CmpInst::Predicate CmpInst::getStrictPredicate(Predicate pred) {
4785 switch (pred) {
4786 case ICMP_SGE:
4787 return ICMP_SGT;
4788 case ICMP_SLE:
4789 return ICMP_SLT;
4790 case ICMP_UGE:
4791 return ICMP_UGT;
4792 case ICMP_ULE:
4793 return ICMP_ULT;
4794 case FCMP_OGE:
4795 return FCMP_OGT;
4796 case FCMP_OLE:
4797 return FCMP_OLT;
4798 case FCMP_UGE:
4799 return FCMP_UGT;
4800 case FCMP_ULE:
4801 return FCMP_ULT;
4802 default:
4803 return pred;
4804 }
4805}
4806
4807CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) {
4808 switch (pred) {
4809 case ICMP_SGT:
4810 return ICMP_SGE;
4811 case ICMP_SLT:
4812 return ICMP_SLE;
4813 case ICMP_UGT:
4814 return ICMP_UGE;
4815 case ICMP_ULT:
4816 return ICMP_ULE;
4817 case FCMP_OGT:
4818 return FCMP_OGE;
4819 case FCMP_OLT:
4820 return FCMP_OLE;
4821 case FCMP_UGT:
4822 return FCMP_UGE;
4823 case FCMP_ULT:
4824 return FCMP_ULE;
4825 default:
4826 return pred;
4827 }
4828}
4829
4830CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) {
4831 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
4832
4833 if (isStrictPredicate(pred))
4834 return getNonStrictPredicate(pred);
4835 if (isNonStrictPredicate(pred))
4836 return getStrictPredicate(pred);
4837
4838 llvm_unreachable("Unknown predicate!");
4839}
4840
4841CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) {
4842 assert(CmpInst::isUnsigned(pred) && "Call only with unsigned predicates!");
4843
4844 switch (pred) {
4845 default:
4846 llvm_unreachable("Unknown predicate!");
4847 case CmpInst::ICMP_ULT:
4848 return CmpInst::ICMP_SLT;
4849 case CmpInst::ICMP_ULE:
4850 return CmpInst::ICMP_SLE;
4851 case CmpInst::ICMP_UGT:
4852 return CmpInst::ICMP_SGT;
4853 case CmpInst::ICMP_UGE:
4854 return CmpInst::ICMP_SGE;
4855 }
4856}
4857
4858CmpInst::Predicate CmpInst::getUnsignedPredicate(Predicate pred) {
4859 assert(CmpInst::isSigned(pred) && "Call only with signed predicates!");
4860
4861 switch (pred) {
4862 default:
4863 llvm_unreachable("Unknown predicate!");
4864 case CmpInst::ICMP_SLT:
4865 return CmpInst::ICMP_ULT;
4866 case CmpInst::ICMP_SLE:
4867 return CmpInst::ICMP_ULE;
4868 case CmpInst::ICMP_SGT:
4869 return CmpInst::ICMP_UGT;
4870 case CmpInst::ICMP_SGE:
4871 return CmpInst::ICMP_UGE;
4872 }
4873}
4874
4875bool CmpInst::isUnsigned(Predicate predicate) {
4876 switch (predicate) {
4877 default: return false;
4878 case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
4879 case ICmpInst::ICMP_UGE: return true;
4880 }
4881}
4882
4883bool CmpInst::isSigned(Predicate predicate) {
4884 switch (predicate) {
4885 default: return false;
4886 case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
4887 case ICmpInst::ICMP_SGE: return true;
4888 }
4889}
4890
4891bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
4892 ICmpInst::Predicate Pred) {
4893 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
4894 switch (Pred) {
4895 case ICmpInst::Predicate::ICMP_EQ:
4896 return LHS.eq(RHS);
4897 case ICmpInst::Predicate::ICMP_NE:
4898 return LHS.ne(RHS);
4899 case ICmpInst::Predicate::ICMP_UGT:
4900 return LHS.ugt(RHS);
4901 case ICmpInst::Predicate::ICMP_UGE:
4902 return LHS.uge(RHS);
4903 case ICmpInst::Predicate::ICMP_ULT:
4904 return LHS.ult(RHS);
4905 case ICmpInst::Predicate::ICMP_ULE:
4906 return LHS.ule(RHS);
4907 case ICmpInst::Predicate::ICMP_SGT:
4908 return LHS.sgt(RHS);
4909 case ICmpInst::Predicate::ICMP_SGE:
4910 return LHS.sge(RHS);
4911 case ICmpInst::Predicate::ICMP_SLT:
4912 return LHS.slt(RHS);
4913 case ICmpInst::Predicate::ICMP_SLE:
4914 return LHS.sle(RHS);
4915 default:
4916 llvm_unreachable("Unexpected non-integer predicate.");
4917 };
4918}
4919
4920bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
4921 FCmpInst::Predicate Pred) {
4922 APFloat::cmpResult R = LHS.compare(RHS);
4923 switch (Pred) {
4924 default:
4925 llvm_unreachable("Invalid FCmp Predicate");
4926 case FCmpInst::FCMP_FALSE:
4927 return false;
4928 case FCmpInst::FCMP_TRUE:
4929 return true;
4930 case FCmpInst::FCMP_UNO:
4931 return R == APFloat::cmpUnordered;
4932 case FCmpInst::FCMP_ORD:
4933 return R != APFloat::cmpUnordered;
4934 case FCmpInst::FCMP_UEQ:
4935 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
4936 case FCmpInst::FCMP_OEQ:
4937 return R == APFloat::cmpEqual;
4938 case FCmpInst::FCMP_UNE:
4939 return R != APFloat::cmpEqual;
4940 case FCmpInst::FCMP_ONE:
4941 return R == APFloat::cmpLessThan || R == APFloat::cmpGreaterThan;
4942 case FCmpInst::FCMP_ULT:
4943 return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
4944 case FCmpInst::FCMP_OLT:
4945 return R == APFloat::cmpLessThan;
4946 case FCmpInst::FCMP_UGT:
4947 return R == APFloat::cmpUnordered || R == APFloat::cmpGreaterThan;
4948 case FCmpInst::FCMP_OGT:
4949 return R == APFloat::cmpGreaterThan;
4950 case FCmpInst::FCMP_ULE:
4951 return R != APFloat::cmpGreaterThan;
4952 case FCmpInst::FCMP_OLE:
4953 return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
4954 case FCmpInst::FCMP_UGE:
4955 return R != APFloat::cmpLessThan;
4956 case FCmpInst::FCMP_OGE:
4957 return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
4958 }
4959}
4960
4961CmpInst::Predicate CmpInst::getFlippedSignednessPredicate(Predicate pred) {
4962 assert(CmpInst::isRelational(pred) &&
4963 "Call only with non-equality predicates!");
4964
4965 if (isSigned(predicate: pred))
4966 return getUnsignedPredicate(pred);
4967 if (isUnsigned(predicate: pred))
4968 return getSignedPredicate(pred);
4969
4970 llvm_unreachable("Unknown predicate!");
4971}
4972
4973bool CmpInst::isOrdered(Predicate predicate) {
4974 switch (predicate) {
4975 default: return false;
4976 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
4977 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
4978 case FCmpInst::FCMP_ORD: return true;
4979 }
4980}
4981
4982bool CmpInst::isUnordered(Predicate predicate) {
4983 switch (predicate) {
4984 default: return false;
4985 case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
4986 case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
4987 case FCmpInst::FCMP_UNO: return true;
4988 }
4989}
4990
4991bool CmpInst::isTrueWhenEqual(Predicate predicate) {
4992 switch(predicate) {
4993 default: return false;
4994 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
4995 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
4996 }
4997}
4998
4999bool CmpInst::isFalseWhenEqual(Predicate predicate) {
5000 switch(predicate) {
5001 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
5002 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
5003 default: return false;
5004 }
5005}
5006
5007bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) {
5008 // If the predicates match, then we know the first condition implies the
5009 // second is true.
5010 if (Pred1 == Pred2)
5011 return true;
5012
5013 switch (Pred1) {
5014 default:
5015 break;
5016 case ICMP_EQ:
5017 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
5018 return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||
5019 Pred2 == ICMP_SLE;
5020 case ICMP_UGT: // A >u B implies A != B and A >=u B are true.
5021 return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;
5022 case ICMP_ULT: // A <u B implies A != B and A <=u B are true.
5023 return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;
5024 case ICMP_SGT: // A >s B implies A != B and A >=s B are true.
5025 return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;
5026 case ICMP_SLT: // A <s B implies A != B and A <=s B are true.
5027 return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;
5028 }
5029 return false;
5030}
5031
5032bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) {
5033 return isImpliedTrueByMatchingCmp(Pred1, Pred2: getInversePredicate(pred: Pred2));
5034}
5035
5036//===----------------------------------------------------------------------===//
5037// SwitchInst Implementation
5038//===----------------------------------------------------------------------===//
5039
5040void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
5041 assert(Value && Default && NumReserved);
5042 ReservedSpace = NumReserved;
5043 setNumHungOffUseOperands(2);
5044 allocHungoffUses(N: ReservedSpace);
5045
5046 Op<0>() = Value;
5047 Op<1>() = Default;
5048}
5049
5050/// SwitchInst ctor - Create a new switch instruction, specifying a value to
5051/// switch on and a default destination. The number of additional cases can
5052/// be specified here to make memory allocation more efficient. This
5053/// constructor can also autoinsert before another instruction.
5054SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
5055 BasicBlock::iterator InsertBefore)
5056 : Instruction(Type::getVoidTy(C&: Value->getContext()), Instruction::Switch,
5057 nullptr, 0, InsertBefore) {
5058 init(Value, Default, NumReserved: 2 + NumCases * 2);
5059}
5060
5061/// SwitchInst ctor - Create a new switch instruction, specifying a value to
5062/// switch on and a default destination. The number of additional cases can
5063/// be specified here to make memory allocation more efficient. This
5064/// constructor can also autoinsert before another instruction.
5065SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
5066 Instruction *InsertBefore)
5067 : Instruction(Type::getVoidTy(C&: Value->getContext()), Instruction::Switch,
5068 nullptr, 0, InsertBefore) {
5069 init(Value, Default, NumReserved: 2+NumCases*2);
5070}
5071
5072/// SwitchInst ctor - Create a new switch instruction, specifying a value to
5073/// switch on and a default destination. The number of additional cases can
5074/// be specified here to make memory allocation more efficient. This
5075/// constructor also autoinserts at the end of the specified BasicBlock.
5076SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
5077 BasicBlock *InsertAtEnd)
5078 : Instruction(Type::getVoidTy(C&: Value->getContext()), Instruction::Switch,
5079 nullptr, 0, InsertAtEnd) {
5080 init(Value, Default, NumReserved: 2+NumCases*2);
5081}
5082
5083SwitchInst::SwitchInst(const SwitchInst &SI)
5084 : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) {
5085 init(Value: SI.getCondition(), Default: SI.getDefaultDest(), NumReserved: SI.getNumOperands());
5086 setNumHungOffUseOperands(SI.getNumOperands());
5087 Use *OL = getOperandList();
5088 const Use *InOL = SI.getOperandList();
5089 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
5090 OL[i] = InOL[i];
5091 OL[i+1] = InOL[i+1];
5092 }
5093 SubclassOptionalData = SI.SubclassOptionalData;
5094}
5095
5096/// addCase - Add an entry to the switch instruction...
5097///
5098void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
5099 unsigned NewCaseIdx = getNumCases();
5100 unsigned OpNo = getNumOperands();
5101 if (OpNo+2 > ReservedSpace)
5102 growOperands(); // Get more space!
5103 // Initialize some new operands.
5104 assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
5105 setNumHungOffUseOperands(OpNo+2);
5106 CaseHandle Case(this, NewCaseIdx);
5107 Case.setValue(OnVal);
5108 Case.setSuccessor(Dest);
5109}
5110
5111/// removeCase - This method removes the specified case and its successor
5112/// from the switch instruction.
5113SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) {
5114 unsigned idx = I->getCaseIndex();
5115
5116 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
5117
5118 unsigned NumOps = getNumOperands();
5119 Use *OL = getOperandList();
5120
5121 // Overwrite this case with the end of the list.
5122 if (2 + (idx + 1) * 2 != NumOps) {
5123 OL[2 + idx * 2] = OL[NumOps - 2];
5124 OL[2 + idx * 2 + 1] = OL[NumOps - 1];
5125 }
5126
5127 // Nuke the last value.
5128 OL[NumOps-2].set(nullptr);
5129 OL[NumOps-2+1].set(nullptr);
5130 setNumHungOffUseOperands(NumOps-2);
5131
5132 return CaseIt(this, idx);
5133}
5134
5135/// growOperands - grow operands - This grows the operand list in response
5136/// to a push_back style of operation. This grows the number of ops by 3 times.
5137///
5138void SwitchInst::growOperands() {
5139 unsigned e = getNumOperands();
5140 unsigned NumOps = e*3;
5141
5142 ReservedSpace = NumOps;
5143 growHungoffUses(N: ReservedSpace);
5144}
5145
5146MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() {
5147 assert(Changed && "called only if metadata has changed");
5148
5149 if (!Weights)
5150 return nullptr;
5151
5152 assert(SI.getNumSuccessors() == Weights->size() &&
5153 "num of prof branch_weights must accord with num of successors");
5154
5155 bool AllZeroes = all_of(Range&: *Weights, P: [](uint32_t W) { return W == 0; });
5156
5157 if (AllZeroes || Weights->size() < 2)
5158 return nullptr;
5159
5160 return MDBuilder(SI.getParent()->getContext()).createBranchWeights(Weights: *Weights);
5161}
5162
5163void SwitchInstProfUpdateWrapper::init() {
5164 MDNode *ProfileData = getBranchWeightMDNode(I: SI);
5165 if (!ProfileData)
5166 return;
5167
5168 if (ProfileData->getNumOperands() != SI.getNumSuccessors() + 1) {
5169 llvm_unreachable("number of prof branch_weights metadata operands does "
5170 "not correspond to number of succesors");
5171 }
5172
5173 SmallVector<uint32_t, 8> Weights;
5174 if (!extractBranchWeights(ProfileData, Weights))
5175 return;
5176 this->Weights = std::move(Weights);
5177}
5178
5179SwitchInst::CaseIt
5180SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) {
5181 if (Weights) {
5182 assert(SI.getNumSuccessors() == Weights->size() &&
5183 "num of prof branch_weights must accord with num of successors");
5184 Changed = true;
5185 // Copy the last case to the place of the removed one and shrink.
5186 // This is tightly coupled with the way SwitchInst::removeCase() removes
5187 // the cases in SwitchInst::removeCase(CaseIt).
5188 (*Weights)[I->getCaseIndex() + 1] = Weights->back();
5189 Weights->pop_back();
5190 }
5191 return SI.removeCase(I);
5192}
5193
5194void SwitchInstProfUpdateWrapper::addCase(
5195 ConstantInt *OnVal, BasicBlock *Dest,
5196 SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
5197 SI.addCase(OnVal, Dest);
5198
5199 if (!Weights && W && *W) {
5200 Changed = true;
5201 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
5202 (*Weights)[SI.getNumSuccessors() - 1] = *W;
5203 } else if (Weights) {
5204 Changed = true;
5205 Weights->push_back(Elt: W.value_or(u: 0));
5206 }
5207 if (Weights)
5208 assert(SI.getNumSuccessors() == Weights->size() &&
5209 "num of prof branch_weights must accord with num of successors");
5210}
5211
5212Instruction::InstListType::iterator
5213SwitchInstProfUpdateWrapper::eraseFromParent() {
5214 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
5215 Changed = false;
5216 if (Weights)
5217 Weights->resize(N: 0);
5218 return SI.eraseFromParent();
5219}
5220
5221SwitchInstProfUpdateWrapper::CaseWeightOpt
5222SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) {
5223 if (!Weights)
5224 return std::nullopt;
5225 return (*Weights)[idx];
5226}
5227
5228void SwitchInstProfUpdateWrapper::setSuccessorWeight(
5229 unsigned idx, SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
5230 if (!W)
5231 return;
5232
5233 if (!Weights && *W)
5234 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
5235
5236 if (Weights) {
5237 auto &OldW = (*Weights)[idx];
5238 if (*W != OldW) {
5239 Changed = true;
5240 OldW = *W;
5241 }
5242 }
5243}
5244
5245SwitchInstProfUpdateWrapper::CaseWeightOpt
5246SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst &SI,
5247 unsigned idx) {
5248 if (MDNode *ProfileData = getBranchWeightMDNode(I: SI))
5249 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
5250 return mdconst::extract<ConstantInt>(MD: ProfileData->getOperand(I: idx + 1))
5251 ->getValue()
5252 .getZExtValue();
5253
5254 return std::nullopt;
5255}
5256
5257//===----------------------------------------------------------------------===//
5258// IndirectBrInst Implementation
5259//===----------------------------------------------------------------------===//
5260
5261void IndirectBrInst::init(Value *Address, unsigned NumDests) {
5262 assert(Address && Address->getType()->isPointerTy() &&
5263 "Address of indirectbr must be a pointer");
5264 ReservedSpace = 1+NumDests;
5265 setNumHungOffUseOperands(1);
5266 allocHungoffUses(N: ReservedSpace);
5267
5268 Op<0>() = Address;
5269}
5270
5271
5272/// growOperands - grow operands - This grows the operand list in response
5273/// to a push_back style of operation. This grows the number of ops by 2 times.
5274///
5275void IndirectBrInst::growOperands() {
5276 unsigned e = getNumOperands();
5277 unsigned NumOps = e*2;
5278
5279 ReservedSpace = NumOps;
5280 growHungoffUses(N: ReservedSpace);
5281}
5282
5283IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
5284 BasicBlock::iterator InsertBefore)
5285 : Instruction(Type::getVoidTy(C&: Address->getContext()),
5286 Instruction::IndirectBr, nullptr, 0, InsertBefore) {
5287 init(Address, NumDests: NumCases);
5288}
5289
5290IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
5291 Instruction *InsertBefore)
5292 : Instruction(Type::getVoidTy(C&: Address->getContext()),
5293 Instruction::IndirectBr, nullptr, 0, InsertBefore) {
5294 init(Address, NumDests: NumCases);
5295}
5296
5297IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
5298 BasicBlock *InsertAtEnd)
5299 : Instruction(Type::getVoidTy(C&: Address->getContext()),
5300 Instruction::IndirectBr, nullptr, 0, InsertAtEnd) {
5301 init(Address, NumDests: NumCases);
5302}
5303
5304IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
5305 : Instruction(Type::getVoidTy(C&: IBI.getContext()), Instruction::IndirectBr,
5306 nullptr, IBI.getNumOperands()) {
5307 allocHungoffUses(N: IBI.getNumOperands());
5308 Use *OL = getOperandList();
5309 const Use *InOL = IBI.getOperandList();
5310 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
5311 OL[i] = InOL[i];
5312 SubclassOptionalData = IBI.SubclassOptionalData;
5313}
5314
5315/// addDestination - Add a destination.
5316///
5317void IndirectBrInst::addDestination(BasicBlock *DestBB) {
5318 unsigned OpNo = getNumOperands();
5319 if (OpNo+1 > ReservedSpace)
5320 growOperands(); // Get more space!
5321 // Initialize some new operands.
5322 assert(OpNo < ReservedSpace && "Growing didn't work!");
5323 setNumHungOffUseOperands(OpNo+1);
5324 getOperandList()[OpNo] = DestBB;
5325}
5326
5327/// removeDestination - This method removes the specified successor from the
5328/// indirectbr instruction.
5329void IndirectBrInst::removeDestination(unsigned idx) {
5330 assert(idx < getNumOperands()-1 && "Successor index out of range!");
5331
5332 unsigned NumOps = getNumOperands();
5333 Use *OL = getOperandList();
5334
5335 // Replace this value with the last one.
5336 OL[idx+1] = OL[NumOps-1];
5337
5338 // Nuke the last value.
5339 OL[NumOps-1].set(nullptr);
5340 setNumHungOffUseOperands(NumOps-1);
5341}
5342
5343//===----------------------------------------------------------------------===//
5344// FreezeInst Implementation
5345//===----------------------------------------------------------------------===//
5346
5347FreezeInst::FreezeInst(Value *S, const Twine &Name,
5348 BasicBlock::iterator InsertBefore)
5349 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
5350 setName(Name);
5351}
5352
5353FreezeInst::FreezeInst(Value *S,
5354 const Twine &Name, Instruction *InsertBefore)
5355 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
5356 setName(Name);
5357}
5358
5359FreezeInst::FreezeInst(Value *S,
5360 const Twine &Name, BasicBlock *InsertAtEnd)
5361 : UnaryInstruction(S->getType(), Freeze, S, InsertAtEnd) {
5362 setName(Name);
5363}
5364
5365//===----------------------------------------------------------------------===//
5366// cloneImpl() implementations
5367//===----------------------------------------------------------------------===//
5368
5369// Define these methods here so vtables don't get emitted into every translation
5370// unit that uses these classes.
5371
5372GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
5373 return new (getNumOperands()) GetElementPtrInst(*this);
5374}
5375
5376UnaryOperator *UnaryOperator::cloneImpl() const {
5377 return Create(Op: getOpcode(), S: Op<0>());
5378}
5379
5380BinaryOperator *BinaryOperator::cloneImpl() const {
5381 return Create(Op: getOpcode(), S1: Op<0>(), S2: Op<1>());
5382}
5383
5384FCmpInst *FCmpInst::cloneImpl() const {
5385 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
5386}
5387
5388ICmpInst *ICmpInst::cloneImpl() const {
5389 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
5390}
5391
5392ExtractValueInst *ExtractValueInst::cloneImpl() const {
5393 return new ExtractValueInst(*this);
5394}
5395
5396InsertValueInst *InsertValueInst::cloneImpl() const {
5397 return new InsertValueInst(*this);
5398}
5399
5400AllocaInst *AllocaInst::cloneImpl() const {
5401 AllocaInst *Result = new AllocaInst(getAllocatedType(), getAddressSpace(),
5402 getOperand(i_nocapture: 0), getAlign());
5403 Result->setUsedWithInAlloca(isUsedWithInAlloca());
5404 Result->setSwiftError(isSwiftError());
5405 return Result;
5406}
5407
5408LoadInst *LoadInst::cloneImpl() const {
5409 return new LoadInst(getType(), getOperand(i_nocapture: 0), Twine(), isVolatile(),
5410 getAlign(), getOrdering(), getSyncScopeID());
5411}
5412
5413StoreInst *StoreInst::cloneImpl() const {
5414 return new StoreInst(getOperand(i_nocapture: 0), getOperand(i_nocapture: 1), isVolatile(), getAlign(),
5415 getOrdering(), getSyncScopeID());
5416}
5417
5418AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
5419 AtomicCmpXchgInst *Result = new AtomicCmpXchgInst(
5420 getOperand(i_nocapture: 0), getOperand(i_nocapture: 1), getOperand(i_nocapture: 2), getAlign(),
5421 getSuccessOrdering(), getFailureOrdering(), getSyncScopeID());
5422 Result->setVolatile(isVolatile());
5423 Result->setWeak(isWeak());
5424 return Result;
5425}
5426
5427AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
5428 AtomicRMWInst *Result =
5429 new AtomicRMWInst(getOperation(), getOperand(i_nocapture: 0), getOperand(i_nocapture: 1),
5430 getAlign(), getOrdering(), getSyncScopeID());
5431 Result->setVolatile(isVolatile());
5432 return Result;
5433}
5434
5435FenceInst *FenceInst::cloneImpl() const {
5436 return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
5437}
5438
5439TruncInst *TruncInst::cloneImpl() const {
5440 return new TruncInst(getOperand(i_nocapture: 0), getType());
5441}
5442
5443ZExtInst *ZExtInst::cloneImpl() const {
5444 return new ZExtInst(getOperand(i_nocapture: 0), getType());
5445}
5446
5447SExtInst *SExtInst::cloneImpl() const {
5448 return new SExtInst(getOperand(i_nocapture: 0), getType());
5449}
5450
5451FPTruncInst *FPTruncInst::cloneImpl() const {
5452 return new FPTruncInst(getOperand(i_nocapture: 0), getType());
5453}
5454
5455FPExtInst *FPExtInst::cloneImpl() const {
5456 return new FPExtInst(getOperand(i_nocapture: 0), getType());
5457}
5458
5459UIToFPInst *UIToFPInst::cloneImpl() const {
5460 return new UIToFPInst(getOperand(i_nocapture: 0), getType());
5461}
5462
5463SIToFPInst *SIToFPInst::cloneImpl() const {
5464 return new SIToFPInst(getOperand(i_nocapture: 0), getType());
5465}
5466
5467FPToUIInst *FPToUIInst::cloneImpl() const {
5468 return new FPToUIInst(getOperand(i_nocapture: 0), getType());
5469}
5470
5471FPToSIInst *FPToSIInst::cloneImpl() const {
5472 return new FPToSIInst(getOperand(i_nocapture: 0), getType());
5473}
5474
5475PtrToIntInst *PtrToIntInst::cloneImpl() const {
5476 return new PtrToIntInst(getOperand(i_nocapture: 0), getType());
5477}
5478
5479IntToPtrInst *IntToPtrInst::cloneImpl() const {
5480 return new IntToPtrInst(getOperand(i_nocapture: 0), getType());
5481}
5482
5483BitCastInst *BitCastInst::cloneImpl() const {
5484 return new BitCastInst(getOperand(i_nocapture: 0), getType());
5485}
5486
5487AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const {
5488 return new AddrSpaceCastInst(getOperand(i_nocapture: 0), getType());
5489}
5490
5491CallInst *CallInst::cloneImpl() const {
5492 if (hasOperandBundles()) {
5493 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
5494 return new(getNumOperands(), DescriptorBytes) CallInst(*this);
5495 }
5496 return new(getNumOperands()) CallInst(*this);
5497}
5498
5499SelectInst *SelectInst::cloneImpl() const {
5500 return SelectInst::Create(C: getOperand(i_nocapture: 0), S1: getOperand(i_nocapture: 1), S2: getOperand(i_nocapture: 2));
5501}
5502
5503VAArgInst *VAArgInst::cloneImpl() const {
5504 return new VAArgInst(getOperand(i_nocapture: 0), getType());
5505}
5506
5507ExtractElementInst *ExtractElementInst::cloneImpl() const {
5508 return ExtractElementInst::Create(Vec: getOperand(i_nocapture: 0), Idx: getOperand(i_nocapture: 1));
5509}
5510
5511InsertElementInst *InsertElementInst::cloneImpl() const {
5512 return InsertElementInst::Create(Vec: getOperand(i_nocapture: 0), NewElt: getOperand(i_nocapture: 1), Idx: getOperand(i_nocapture: 2));
5513}
5514
5515ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const {
5516 return new ShuffleVectorInst(getOperand(i_nocapture: 0), getOperand(i_nocapture: 1), getShuffleMask());
5517}
5518
5519PHINode *PHINode::cloneImpl() const { return new PHINode(*this); }
5520
5521LandingPadInst *LandingPadInst::cloneImpl() const {
5522 return new LandingPadInst(*this);
5523}
5524
5525ReturnInst *ReturnInst::cloneImpl() const {
5526 return new(getNumOperands()) ReturnInst(*this);
5527}
5528
5529BranchInst *BranchInst::cloneImpl() const {
5530 return new(getNumOperands()) BranchInst(*this);
5531}
5532
5533SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
5534
5535IndirectBrInst *IndirectBrInst::cloneImpl() const {
5536 return new IndirectBrInst(*this);
5537}
5538
5539InvokeInst *InvokeInst::cloneImpl() const {
5540 if (hasOperandBundles()) {
5541 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
5542 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this);
5543 }
5544 return new(getNumOperands()) InvokeInst(*this);
5545}
5546
5547CallBrInst *CallBrInst::cloneImpl() const {
5548 if (hasOperandBundles()) {
5549 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
5550 return new (getNumOperands(), DescriptorBytes) CallBrInst(*this);
5551 }
5552 return new (getNumOperands()) CallBrInst(*this);
5553}
5554
5555ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
5556
5557CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
5558 return new (getNumOperands()) CleanupReturnInst(*this);
5559}
5560
5561CatchReturnInst *CatchReturnInst::cloneImpl() const {
5562 return new (getNumOperands()) CatchReturnInst(*this);
5563}
5564
5565CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
5566 return new CatchSwitchInst(*this);
5567}
5568
5569FuncletPadInst *FuncletPadInst::cloneImpl() const {
5570 return new (getNumOperands()) FuncletPadInst(*this);
5571}
5572
5573UnreachableInst *UnreachableInst::cloneImpl() const {
5574 LLVMContext &Context = getContext();
5575 return new UnreachableInst(Context);
5576}
5577
5578FreezeInst *FreezeInst::cloneImpl() const {
5579 return new FreezeInst(getOperand(i_nocapture: 0));
5580}
5581

source code of llvm/lib/IR/Instructions.cpp