1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
11#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12#include "llvm/CodeGen/MachineFunction.h"
13#include "llvm/CodeGen/MachineInstr.h"
14#include "llvm/CodeGen/MachineInstrBuilder.h"
15#include "llvm/CodeGen/MachineRegisterInfo.h"
16#include "llvm/CodeGen/TargetInstrInfo.h"
17#include "llvm/CodeGen/TargetLowering.h"
18#include "llvm/CodeGen/TargetOpcodes.h"
19#include "llvm/CodeGen/TargetSubtargetInfo.h"
20#include "llvm/IR/DebugInfoMetadata.h"
21
22using namespace llvm;
23
24void MachineIRBuilder::setMF(MachineFunction &MF) {
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
31 State.II = MachineBasicBlock::iterator();
32 State.Observer = nullptr;
33}
34
35//------------------------------------------------------------------------------
36// Build instruction variants.
37//------------------------------------------------------------------------------
38
39MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
40 return BuildMI(MF&: getMF(), MIMD: {getDL(), getPCSections()}, MCID: getTII().get(Opcode));
41}
42
43MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
44 getMBB().insert(I: getInsertPt(), MI: MIB);
45 recordInsertion(InsertedInstr: MIB);
46 return MIB;
47}
48
49MachineInstrBuilder
50MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
51 const MDNode *Expr) {
52 assert(isa<DILocalVariable>(Variable) && "not a variable");
53 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
54 assert(
55 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
56 "Expected inlined-at fields to agree");
57 return insertInstr(MIB: BuildMI(MF&: getMF(), DL: getDL(),
58 MCID: getTII().get(Opcode: TargetOpcode::DBG_VALUE),
59 /*IsIndirect*/ false, Reg, Variable, Expr));
60}
61
62MachineInstrBuilder
63MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
64 const MDNode *Expr) {
65 assert(isa<DILocalVariable>(Variable) && "not a variable");
66 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
67 assert(
68 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
69 "Expected inlined-at fields to agree");
70 return insertInstr(MIB: BuildMI(MF&: getMF(), DL: getDL(),
71 MCID: getTII().get(Opcode: TargetOpcode::DBG_VALUE),
72 /*IsIndirect*/ true, Reg, Variable, Expr));
73}
74
75MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
76 const MDNode *Variable,
77 const MDNode *Expr) {
78 assert(isa<DILocalVariable>(Variable) && "not a variable");
79 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
80 assert(
81 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
82 "Expected inlined-at fields to agree");
83 return insertInstr(MIB: buildInstrNoInsert(Opcode: TargetOpcode::DBG_VALUE)
84 .addFrameIndex(Idx: FI)
85 .addImm(Val: 0)
86 .addMetadata(MD: Variable)
87 .addMetadata(MD: Expr));
88}
89
90MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
91 const MDNode *Variable,
92 const MDNode *Expr) {
93 assert(isa<DILocalVariable>(Variable) && "not a variable");
94 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
95 assert(
96 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
97 "Expected inlined-at fields to agree");
98 auto MIB = buildInstrNoInsert(Opcode: TargetOpcode::DBG_VALUE);
99
100 auto *NumericConstant = [&] () -> const Constant* {
101 if (const auto *CE = dyn_cast<ConstantExpr>(Val: &C))
102 if (CE->getOpcode() == Instruction::IntToPtr)
103 return CE->getOperand(i_nocapture: 0);
104 return &C;
105 }();
106
107 if (auto *CI = dyn_cast<ConstantInt>(Val: NumericConstant)) {
108 if (CI->getBitWidth() > 64)
109 MIB.addCImm(Val: CI);
110 else
111 MIB.addImm(Val: CI->getZExtValue());
112 } else if (auto *CFP = dyn_cast<ConstantFP>(Val: NumericConstant)) {
113 MIB.addFPImm(Val: CFP);
114 } else if (isa<ConstantPointerNull>(Val: NumericConstant)) {
115 MIB.addImm(Val: 0);
116 } else {
117 // Insert $noreg if we didn't find a usable constant and had to drop it.
118 MIB.addReg(RegNo: Register());
119 }
120
121 MIB.addImm(Val: 0).addMetadata(MD: Variable).addMetadata(MD: Expr);
122 return insertInstr(MIB);
123}
124
125MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
126 assert(isa<DILabel>(Label) && "not a label");
127 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
128 "Expected inlined-at fields to agree");
129 auto MIB = buildInstr(Opcode: TargetOpcode::DBG_LABEL);
130
131 return MIB.addMetadata(MD: Label);
132}
133
134MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
135 const SrcOp &Size,
136 Align Alignment) {
137 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
138 auto MIB = buildInstr(Opcode: TargetOpcode::G_DYN_STACKALLOC);
139 Res.addDefToMIB(MRI&: *getMRI(), MIB);
140 Size.addSrcToMIB(MIB);
141 MIB.addImm(Val: Alignment.value());
142 return MIB;
143}
144
145MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
146 int Idx) {
147 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
148 auto MIB = buildInstr(Opcode: TargetOpcode::G_FRAME_INDEX);
149 Res.addDefToMIB(MRI&: *getMRI(), MIB);
150 MIB.addFrameIndex(Idx);
151 return MIB;
152}
153
154MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
155 const GlobalValue *GV) {
156 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
157 assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
158 GV->getType()->getAddressSpace() &&
159 "address space mismatch");
160
161 auto MIB = buildInstr(Opcode: TargetOpcode::G_GLOBAL_VALUE);
162 Res.addDefToMIB(MRI&: *getMRI(), MIB);
163 MIB.addGlobalAddress(GV);
164 return MIB;
165}
166
167MachineInstrBuilder MachineIRBuilder::buildConstantPool(const DstOp &Res,
168 unsigned Idx) {
169 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
170 auto MIB = buildInstr(Opcode: TargetOpcode::G_CONSTANT_POOL);
171 Res.addDefToMIB(MRI&: *getMRI(), MIB);
172 MIB.addConstantPoolIndex(Idx);
173 return MIB;
174}
175
176MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
177 unsigned JTI) {
178 return buildInstr(Opc: TargetOpcode::G_JUMP_TABLE, DstOps: {PtrTy}, SrcOps: {})
179 .addJumpTableIndex(Idx: JTI);
180}
181
182void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
183 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
184 assert((Res == Op0) && "type mismatch");
185}
186
187void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
188 const LLT Op1) {
189 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
190 assert((Res == Op0 && Res == Op1) && "type mismatch");
191}
192
193void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
194 const LLT Op1) {
195 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
196 assert((Res == Op0) && "type mismatch");
197}
198
199MachineInstrBuilder
200MachineIRBuilder::buildPtrAdd(const DstOp &Res, const SrcOp &Op0,
201 const SrcOp &Op1, std::optional<unsigned> Flags) {
202 assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
203 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
204 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
205
206 return buildInstr(Opc: TargetOpcode::G_PTR_ADD, DstOps: {Res}, SrcOps: {Op0, Op1}, Flags);
207}
208
209std::optional<MachineInstrBuilder>
210MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
211 const LLT ValueTy, uint64_t Value) {
212 assert(Res == 0 && "Res is a result argument");
213 assert(ValueTy.isScalar() && "invalid offset type");
214
215 if (Value == 0) {
216 Res = Op0;
217 return std::nullopt;
218 }
219
220 Res = getMRI()->createGenericVirtualRegister(Ty: getMRI()->getType(Reg: Op0));
221 auto Cst = buildConstant(Res: ValueTy, Val: Value);
222 return buildPtrAdd(Res, Op0, Op1: Cst.getReg(Idx: 0));
223}
224
225MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
226 const SrcOp &Op0,
227 uint32_t NumBits) {
228 LLT PtrTy = Res.getLLTTy(MRI: *getMRI());
229 LLT MaskTy = LLT::scalar(SizeInBits: PtrTy.getSizeInBits());
230 Register MaskReg = getMRI()->createGenericVirtualRegister(Ty: MaskTy);
231 buildConstant(Res: MaskReg, Val: maskTrailingZeros<uint64_t>(N: NumBits));
232 return buildPtrMask(Res, Op0, Op1: MaskReg);
233}
234
235MachineInstrBuilder
236MachineIRBuilder::buildPadVectorWithUndefElements(const DstOp &Res,
237 const SrcOp &Op0) {
238 LLT ResTy = Res.getLLTTy(MRI: *getMRI());
239 LLT Op0Ty = Op0.getLLTTy(MRI: *getMRI());
240
241 assert(ResTy.isVector() && "Res non vector type");
242
243 SmallVector<Register, 8> Regs;
244 if (Op0Ty.isVector()) {
245 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
246 "Different vector element types");
247 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
248 "Op0 has more elements");
249 auto Unmerge = buildUnmerge(Res: Op0Ty.getElementType(), Op: Op0);
250
251 for (auto Op : Unmerge.getInstr()->defs())
252 Regs.push_back(Elt: Op.getReg());
253 } else {
254 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
255 "Op0 has more size");
256 Regs.push_back(Elt: Op0.getReg());
257 }
258 Register Undef =
259 buildUndef(Res: Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(Idx: 0);
260 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
261 for (unsigned i = 0; i < NumberOfPadElts; ++i)
262 Regs.push_back(Elt: Undef);
263 return buildMergeLikeInstr(Res, Ops: Regs);
264}
265
266MachineInstrBuilder
267MachineIRBuilder::buildDeleteTrailingVectorElements(const DstOp &Res,
268 const SrcOp &Op0) {
269 LLT ResTy = Res.getLLTTy(MRI: *getMRI());
270 LLT Op0Ty = Op0.getLLTTy(MRI: *getMRI());
271
272 assert((ResTy.isVector() && Op0Ty.isVector()) && "Non vector type");
273 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
274 "Different vector element types");
275 assert((ResTy.getNumElements() < Op0Ty.getNumElements()) &&
276 "Op0 has fewer elements");
277
278 SmallVector<Register, 8> Regs;
279 auto Unmerge = buildUnmerge(Res: Op0Ty.getElementType(), Op: Op0);
280 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
281 Regs.push_back(Elt: Unmerge.getReg(Idx: i));
282 return buildMergeLikeInstr(Res, Ops: Regs);
283}
284
285MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
286 return buildInstr(Opcode: TargetOpcode::G_BR).addMBB(MBB: &Dest);
287}
288
289MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
290 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
291 return buildInstr(Opcode: TargetOpcode::G_BRINDIRECT).addUse(RegNo: Tgt);
292}
293
294MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
295 unsigned JTI,
296 Register IndexReg) {
297 assert(getMRI()->getType(TablePtr).isPointer() &&
298 "Table reg must be a pointer");
299 return buildInstr(Opcode: TargetOpcode::G_BRJT)
300 .addUse(RegNo: TablePtr)
301 .addJumpTableIndex(Idx: JTI)
302 .addUse(RegNo: IndexReg);
303}
304
305MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
306 const SrcOp &Op) {
307 return buildInstr(Opc: TargetOpcode::COPY, DstOps: Res, SrcOps: Op);
308}
309
310MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
311 const ConstantInt &Val) {
312 LLT Ty = Res.getLLTTy(MRI: *getMRI());
313 LLT EltTy = Ty.getScalarType();
314 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
315 "creating constant with the wrong size");
316
317 assert(!Ty.isScalableVector() &&
318 "unexpected scalable vector in buildConstant");
319
320 if (Ty.isFixedVector()) {
321 auto Const = buildInstr(Opcode: TargetOpcode::G_CONSTANT)
322 .addDef(RegNo: getMRI()->createGenericVirtualRegister(Ty: EltTy))
323 .addCImm(Val: &Val);
324 return buildSplatVector(Res, Src: Const);
325 }
326
327 auto Const = buildInstr(Opcode: TargetOpcode::G_CONSTANT);
328 Const->setDebugLoc(DebugLoc());
329 Res.addDefToMIB(MRI&: *getMRI(), MIB&: Const);
330 Const.addCImm(Val: &Val);
331 return Const;
332}
333
334MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
335 int64_t Val) {
336 auto IntN = IntegerType::get(C&: getMF().getFunction().getContext(),
337 NumBits: Res.getLLTTy(MRI: *getMRI()).getScalarSizeInBits());
338 ConstantInt *CI = ConstantInt::get(Ty: IntN, V: Val, IsSigned: true);
339 return buildConstant(Res, Val: *CI);
340}
341
342MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
343 const ConstantFP &Val) {
344 LLT Ty = Res.getLLTTy(MRI: *getMRI());
345 LLT EltTy = Ty.getScalarType();
346
347 assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
348 == EltTy.getSizeInBits() &&
349 "creating fconstant with the wrong size");
350
351 assert(!Ty.isPointer() && "invalid operand type");
352
353 assert(!Ty.isScalableVector() &&
354 "unexpected scalable vector in buildFConstant");
355
356 if (Ty.isFixedVector()) {
357 auto Const = buildInstr(Opcode: TargetOpcode::G_FCONSTANT)
358 .addDef(RegNo: getMRI()->createGenericVirtualRegister(Ty: EltTy))
359 .addFPImm(Val: &Val);
360
361 return buildSplatVector(Res, Src: Const);
362 }
363
364 auto Const = buildInstr(Opcode: TargetOpcode::G_FCONSTANT);
365 Const->setDebugLoc(DebugLoc());
366 Res.addDefToMIB(MRI&: *getMRI(), MIB&: Const);
367 Const.addFPImm(Val: &Val);
368 return Const;
369}
370
371MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
372 const APInt &Val) {
373 ConstantInt *CI = ConstantInt::get(Context&: getMF().getFunction().getContext(), V: Val);
374 return buildConstant(Res, Val: *CI);
375}
376
377MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
378 double Val) {
379 LLT DstTy = Res.getLLTTy(MRI: *getMRI());
380 auto &Ctx = getMF().getFunction().getContext();
381 auto *CFP =
382 ConstantFP::get(Context&: Ctx, V: getAPFloatFromSize(Val, Size: DstTy.getScalarSizeInBits()));
383 return buildFConstant(Res, Val: *CFP);
384}
385
386MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
387 const APFloat &Val) {
388 auto &Ctx = getMF().getFunction().getContext();
389 auto *CFP = ConstantFP::get(Context&: Ctx, V: Val);
390 return buildFConstant(Res, Val: *CFP);
391}
392
393MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
394 MachineBasicBlock &Dest) {
395 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
396
397 auto MIB = buildInstr(Opcode: TargetOpcode::G_BRCOND);
398 Tst.addSrcToMIB(MIB);
399 MIB.addMBB(MBB: &Dest);
400 return MIB;
401}
402
403MachineInstrBuilder
404MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
405 MachinePointerInfo PtrInfo, Align Alignment,
406 MachineMemOperand::Flags MMOFlags,
407 const AAMDNodes &AAInfo) {
408 MMOFlags |= MachineMemOperand::MOLoad;
409 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
410
411 LLT Ty = Dst.getLLTTy(MRI: *getMRI());
412 MachineMemOperand *MMO =
413 getMF().getMachineMemOperand(PtrInfo, f: MMOFlags, MemTy: Ty, base_alignment: Alignment, AAInfo);
414 return buildLoad(Res: Dst, Addr, MMO&: *MMO);
415}
416
417MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
418 const DstOp &Res,
419 const SrcOp &Addr,
420 MachineMemOperand &MMO) {
421 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
422 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
423
424 auto MIB = buildInstr(Opcode);
425 Res.addDefToMIB(MRI&: *getMRI(), MIB);
426 Addr.addSrcToMIB(MIB);
427 MIB.addMemOperand(MMO: &MMO);
428 return MIB;
429}
430
431MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
432 const DstOp &Dst, const SrcOp &BasePtr,
433 MachineMemOperand &BaseMMO, int64_t Offset) {
434 LLT LoadTy = Dst.getLLTTy(MRI: *getMRI());
435 MachineMemOperand *OffsetMMO =
436 getMF().getMachineMemOperand(MMO: &BaseMMO, Offset, Ty: LoadTy);
437
438 if (Offset == 0) // This may be a size or type changing load.
439 return buildLoad(Res: Dst, Addr: BasePtr, MMO&: *OffsetMMO);
440
441 LLT PtrTy = BasePtr.getLLTTy(MRI: *getMRI());
442 LLT OffsetTy = LLT::scalar(SizeInBits: PtrTy.getSizeInBits());
443 auto ConstOffset = buildConstant(Res: OffsetTy, Val: Offset);
444 auto Ptr = buildPtrAdd(Res: PtrTy, Op0: BasePtr, Op1: ConstOffset);
445 return buildLoad(Res: Dst, Addr: Ptr, MMO&: *OffsetMMO);
446}
447
448MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
449 const SrcOp &Addr,
450 MachineMemOperand &MMO) {
451 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
452 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
453
454 auto MIB = buildInstr(Opcode: TargetOpcode::G_STORE);
455 Val.addSrcToMIB(MIB);
456 Addr.addSrcToMIB(MIB);
457 MIB.addMemOperand(MMO: &MMO);
458 return MIB;
459}
460
461MachineInstrBuilder
462MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
463 MachinePointerInfo PtrInfo, Align Alignment,
464 MachineMemOperand::Flags MMOFlags,
465 const AAMDNodes &AAInfo) {
466 MMOFlags |= MachineMemOperand::MOStore;
467 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
468
469 LLT Ty = Val.getLLTTy(MRI: *getMRI());
470 MachineMemOperand *MMO =
471 getMF().getMachineMemOperand(PtrInfo, f: MMOFlags, MemTy: Ty, base_alignment: Alignment, AAInfo);
472 return buildStore(Val, Addr, MMO&: *MMO);
473}
474
475MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
476 const SrcOp &Op) {
477 return buildInstr(Opc: TargetOpcode::G_ANYEXT, DstOps: Res, SrcOps: Op);
478}
479
480MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
481 const SrcOp &Op) {
482 return buildInstr(Opc: TargetOpcode::G_SEXT, DstOps: Res, SrcOps: Op);
483}
484
485MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
486 const SrcOp &Op) {
487 return buildInstr(Opc: TargetOpcode::G_ZEXT, DstOps: Res, SrcOps: Op);
488}
489
490unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
491 const auto *TLI = getMF().getSubtarget().getTargetLowering();
492 switch (TLI->getBooleanContents(isVec: IsVec, isFloat: IsFP)) {
493 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
494 return TargetOpcode::G_SEXT;
495 case TargetLoweringBase::ZeroOrOneBooleanContent:
496 return TargetOpcode::G_ZEXT;
497 default:
498 return TargetOpcode::G_ANYEXT;
499 }
500}
501
502MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
503 const SrcOp &Op,
504 bool IsFP) {
505 unsigned ExtOp = getBoolExtOp(IsVec: getMRI()->getType(Reg: Op.getReg()).isVector(), IsFP);
506 return buildInstr(Opc: ExtOp, DstOps: Res, SrcOps: Op);
507}
508
509MachineInstrBuilder MachineIRBuilder::buildBoolExtInReg(const DstOp &Res,
510 const SrcOp &Op,
511 bool IsVector,
512 bool IsFP) {
513 const auto *TLI = getMF().getSubtarget().getTargetLowering();
514 switch (TLI->getBooleanContents(isVec: IsVector, isFloat: IsFP)) {
515 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
516 return buildSExtInReg(Res, Op, ImmOp: 1);
517 case TargetLoweringBase::ZeroOrOneBooleanContent:
518 return buildZExtInReg(Res, Op, ImmOp: 1);
519 case TargetLoweringBase::UndefinedBooleanContent:
520 return buildCopy(Res, Op);
521 }
522
523 llvm_unreachable("unexpected BooleanContent");
524}
525
526MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
527 const DstOp &Res,
528 const SrcOp &Op) {
529 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
530 TargetOpcode::G_SEXT == ExtOpc) &&
531 "Expecting Extending Opc");
532 assert(Res.getLLTTy(*getMRI()).isScalar() ||
533 Res.getLLTTy(*getMRI()).isVector());
534 assert(Res.getLLTTy(*getMRI()).isScalar() ==
535 Op.getLLTTy(*getMRI()).isScalar());
536
537 unsigned Opcode = TargetOpcode::COPY;
538 if (Res.getLLTTy(MRI: *getMRI()).getSizeInBits() >
539 Op.getLLTTy(MRI: *getMRI()).getSizeInBits())
540 Opcode = ExtOpc;
541 else if (Res.getLLTTy(MRI: *getMRI()).getSizeInBits() <
542 Op.getLLTTy(MRI: *getMRI()).getSizeInBits())
543 Opcode = TargetOpcode::G_TRUNC;
544 else
545 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
546
547 return buildInstr(Opc: Opcode, DstOps: Res, SrcOps: Op);
548}
549
550MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
551 const SrcOp &Op) {
552 return buildExtOrTrunc(ExtOpc: TargetOpcode::G_SEXT, Res, Op);
553}
554
555MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
556 const SrcOp &Op) {
557 return buildExtOrTrunc(ExtOpc: TargetOpcode::G_ZEXT, Res, Op);
558}
559
560MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
561 const SrcOp &Op) {
562 return buildExtOrTrunc(ExtOpc: TargetOpcode::G_ANYEXT, Res, Op);
563}
564
565MachineInstrBuilder MachineIRBuilder::buildZExtInReg(const DstOp &Res,
566 const SrcOp &Op,
567 int64_t ImmOp) {
568 LLT ResTy = Res.getLLTTy(MRI: *getMRI());
569 auto Mask = buildConstant(
570 Res: ResTy, Val: APInt::getLowBitsSet(numBits: ResTy.getScalarSizeInBits(), loBitsSet: ImmOp));
571 return buildAnd(Dst: Res, Src0: Op, Src1: Mask);
572}
573
574MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
575 const SrcOp &Src) {
576 LLT SrcTy = Src.getLLTTy(MRI: *getMRI());
577 LLT DstTy = Dst.getLLTTy(MRI: *getMRI());
578 if (SrcTy == DstTy)
579 return buildCopy(Res: Dst, Op: Src);
580
581 unsigned Opcode;
582 if (SrcTy.isPointer() && DstTy.isScalar())
583 Opcode = TargetOpcode::G_PTRTOINT;
584 else if (DstTy.isPointer() && SrcTy.isScalar())
585 Opcode = TargetOpcode::G_INTTOPTR;
586 else {
587 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
588 Opcode = TargetOpcode::G_BITCAST;
589 }
590
591 return buildInstr(Opc: Opcode, DstOps: Dst, SrcOps: Src);
592}
593
594MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
595 const SrcOp &Src,
596 uint64_t Index) {
597 LLT SrcTy = Src.getLLTTy(MRI: *getMRI());
598 LLT DstTy = Dst.getLLTTy(MRI: *getMRI());
599
600#ifndef NDEBUG
601 assert(SrcTy.isValid() && "invalid operand type");
602 assert(DstTy.isValid() && "invalid operand type");
603 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
604 "extracting off end of register");
605#endif
606
607 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
608 assert(Index == 0 && "insertion past the end of a register");
609 return buildCast(Dst, Src);
610 }
611
612 auto Extract = buildInstr(Opcode: TargetOpcode::G_EXTRACT);
613 Dst.addDefToMIB(MRI&: *getMRI(), MIB&: Extract);
614 Src.addSrcToMIB(MIB&: Extract);
615 Extract.addImm(Val: Index);
616 return Extract;
617}
618
619MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
620 return buildInstr(Opc: TargetOpcode::G_IMPLICIT_DEF, DstOps: {Res}, SrcOps: {});
621}
622
623MachineInstrBuilder MachineIRBuilder::buildMergeValues(const DstOp &Res,
624 ArrayRef<Register> Ops) {
625 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
626 // we need some temporary storage for the DstOp objects. Here we use a
627 // sufficiently large SmallVector to not go through the heap.
628 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
629 assert(TmpVec.size() > 1);
630 return buildInstr(Opc: TargetOpcode::G_MERGE_VALUES, DstOps: Res, SrcOps: TmpVec);
631}
632
633MachineInstrBuilder
634MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
635 ArrayRef<Register> Ops) {
636 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
637 // we need some temporary storage for the DstOp objects. Here we use a
638 // sufficiently large SmallVector to not go through the heap.
639 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
640 assert(TmpVec.size() > 1);
641 return buildInstr(Opc: getOpcodeForMerge(DstOp: Res, SrcOps: TmpVec), DstOps: Res, SrcOps: TmpVec);
642}
643
644MachineInstrBuilder
645MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
646 std::initializer_list<SrcOp> Ops) {
647 assert(Ops.size() > 1);
648 return buildInstr(Opc: getOpcodeForMerge(DstOp: Res, SrcOps: Ops), DstOps: Res, SrcOps: Ops);
649}
650
651unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
652 ArrayRef<SrcOp> SrcOps) const {
653 if (DstOp.getLLTTy(MRI: *getMRI()).isVector()) {
654 if (SrcOps[0].getLLTTy(MRI: *getMRI()).isVector())
655 return TargetOpcode::G_CONCAT_VECTORS;
656 return TargetOpcode::G_BUILD_VECTOR;
657 }
658
659 return TargetOpcode::G_MERGE_VALUES;
660}
661
662MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
663 const SrcOp &Op) {
664 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
665 // we need some temporary storage for the DstOp objects. Here we use a
666 // sufficiently large SmallVector to not go through the heap.
667 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
668 assert(TmpVec.size() > 1);
669 return buildInstr(Opc: TargetOpcode::G_UNMERGE_VALUES, DstOps: TmpVec, SrcOps: Op);
670}
671
672MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
673 const SrcOp &Op) {
674 unsigned NumReg = Op.getLLTTy(MRI: *getMRI()).getSizeInBits() / Res.getSizeInBits();
675 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
676 return buildInstr(Opc: TargetOpcode::G_UNMERGE_VALUES, DstOps: TmpVec, SrcOps: Op);
677}
678
679MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
680 const SrcOp &Op) {
681 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
682 // we need some temporary storage for the DstOp objects. Here we use a
683 // sufficiently large SmallVector to not go through the heap.
684 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
685 assert(TmpVec.size() > 1);
686 return buildInstr(Opc: TargetOpcode::G_UNMERGE_VALUES, DstOps: TmpVec, SrcOps: Op);
687}
688
689MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
690 ArrayRef<Register> Ops) {
691 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
692 // we need some temporary storage for the DstOp objects. Here we use a
693 // sufficiently large SmallVector to not go through the heap.
694 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
695 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
696}
697
698MachineInstrBuilder
699MachineIRBuilder::buildBuildVectorConstant(const DstOp &Res,
700 ArrayRef<APInt> Ops) {
701 SmallVector<SrcOp> TmpVec;
702 TmpVec.reserve(N: Ops.size());
703 LLT EltTy = Res.getLLTTy(MRI: *getMRI()).getElementType();
704 for (const auto &Op : Ops)
705 TmpVec.push_back(Elt: buildConstant(Res: EltTy, Val: Op));
706 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
707}
708
709MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
710 const SrcOp &Src) {
711 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(MRI: *getMRI()).getNumElements(), Src);
712 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
713}
714
715MachineInstrBuilder
716MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
717 ArrayRef<Register> Ops) {
718 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
719 // we need some temporary storage for the DstOp objects. Here we use a
720 // sufficiently large SmallVector to not go through the heap.
721 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
722 if (TmpVec[0].getLLTTy(MRI: *getMRI()).getSizeInBits() ==
723 Res.getLLTTy(MRI: *getMRI()).getElementType().getSizeInBits())
724 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
725 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR_TRUNC, DstOps: Res, SrcOps: TmpVec);
726}
727
728MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
729 const SrcOp &Src) {
730 LLT DstTy = Res.getLLTTy(MRI: *getMRI());
731 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
732 "Expected Src to match Dst elt ty");
733 auto UndefVec = buildUndef(Res: DstTy);
734 auto Zero = buildConstant(Res: LLT::scalar(SizeInBits: 64), Val: 0);
735 auto InsElt = buildInsertVectorElement(Res: DstTy, Val: UndefVec, Elt: Src, Idx: Zero);
736 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
737 return buildShuffleVector(Res: DstTy, Src1: InsElt, Src2: UndefVec, Mask: ZeroMask);
738}
739
740MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
741 const SrcOp &Src1,
742 const SrcOp &Src2,
743 ArrayRef<int> Mask) {
744 LLT DstTy = Res.getLLTTy(MRI: *getMRI());
745 LLT Src1Ty = Src1.getLLTTy(MRI: *getMRI());
746 LLT Src2Ty = Src2.getLLTTy(MRI: *getMRI());
747 assert((size_t)(Src1Ty.getNumElements() + Src2Ty.getNumElements()) >=
748 Mask.size());
749 assert(DstTy.getElementType() == Src1Ty.getElementType() &&
750 DstTy.getElementType() == Src2Ty.getElementType());
751 (void)DstTy;
752 (void)Src1Ty;
753 (void)Src2Ty;
754 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
755 return buildInstr(Opc: TargetOpcode::G_SHUFFLE_VECTOR, DstOps: {Res}, SrcOps: {Src1, Src2})
756 .addShuffleMask(Val: MaskAlloc);
757}
758
759MachineInstrBuilder
760MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
761 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
762 // we need some temporary storage for the DstOp objects. Here we use a
763 // sufficiently large SmallVector to not go through the heap.
764 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
765 return buildInstr(Opc: TargetOpcode::G_CONCAT_VECTORS, DstOps: Res, SrcOps: TmpVec);
766}
767
768MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
769 const SrcOp &Src,
770 const SrcOp &Op,
771 unsigned Index) {
772 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
773 Res.getLLTTy(*getMRI()).getSizeInBits() &&
774 "insertion past the end of a register");
775
776 if (Res.getLLTTy(MRI: *getMRI()).getSizeInBits() ==
777 Op.getLLTTy(MRI: *getMRI()).getSizeInBits()) {
778 return buildCast(Dst: Res, Src: Op);
779 }
780
781 return buildInstr(Opc: TargetOpcode::G_INSERT, DstOps: Res, SrcOps: {Src, Op, uint64_t(Index)});
782}
783
784static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
785 if (HasSideEffects && IsConvergent)
786 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
787 if (HasSideEffects)
788 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
789 if (IsConvergent)
790 return TargetOpcode::G_INTRINSIC_CONVERGENT;
791 return TargetOpcode::G_INTRINSIC;
792}
793
794MachineInstrBuilder
795MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
796 ArrayRef<Register> ResultRegs,
797 bool HasSideEffects, bool isConvergent) {
798 auto MIB = buildInstr(Opcode: getIntrinsicOpcode(HasSideEffects, IsConvergent: isConvergent));
799 for (unsigned ResultReg : ResultRegs)
800 MIB.addDef(RegNo: ResultReg);
801 MIB.addIntrinsicID(ID);
802 return MIB;
803}
804
805MachineInstrBuilder
806MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
807 ArrayRef<Register> ResultRegs) {
808 auto Attrs = Intrinsic::getAttributes(C&: getContext(), id: ID);
809 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
810 bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
811 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
812}
813
814MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
815 ArrayRef<DstOp> Results,
816 bool HasSideEffects,
817 bool isConvergent) {
818 auto MIB = buildInstr(Opcode: getIntrinsicOpcode(HasSideEffects, IsConvergent: isConvergent));
819 for (DstOp Result : Results)
820 Result.addDefToMIB(MRI&: *getMRI(), MIB);
821 MIB.addIntrinsicID(ID);
822 return MIB;
823}
824
825MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
826 ArrayRef<DstOp> Results) {
827 auto Attrs = Intrinsic::getAttributes(C&: getContext(), id: ID);
828 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
829 bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
830 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
831}
832
833MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
834 const SrcOp &Op) {
835 return buildInstr(Opc: TargetOpcode::G_TRUNC, DstOps: Res, SrcOps: Op);
836}
837
838MachineInstrBuilder
839MachineIRBuilder::buildFPTrunc(const DstOp &Res, const SrcOp &Op,
840 std::optional<unsigned> Flags) {
841 return buildInstr(Opc: TargetOpcode::G_FPTRUNC, DstOps: Res, SrcOps: Op, Flags);
842}
843
844MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
845 const DstOp &Res,
846 const SrcOp &Op0,
847 const SrcOp &Op1) {
848 return buildInstr(Opc: TargetOpcode::G_ICMP, DstOps: Res, SrcOps: {Pred, Op0, Op1});
849}
850
851MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
852 const DstOp &Res,
853 const SrcOp &Op0,
854 const SrcOp &Op1,
855 std::optional<unsigned> Flags) {
856
857 return buildInstr(Opc: TargetOpcode::G_FCMP, DstOps: Res, SrcOps: {Pred, Op0, Op1}, Flags);
858}
859
860MachineInstrBuilder
861MachineIRBuilder::buildSelect(const DstOp &Res, const SrcOp &Tst,
862 const SrcOp &Op0, const SrcOp &Op1,
863 std::optional<unsigned> Flags) {
864
865 return buildInstr(Opc: TargetOpcode::G_SELECT, DstOps: {Res}, SrcOps: {Tst, Op0, Op1}, Flags);
866}
867
868MachineInstrBuilder
869MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
870 const SrcOp &Elt, const SrcOp &Idx) {
871 return buildInstr(Opc: TargetOpcode::G_INSERT_VECTOR_ELT, DstOps: Res, SrcOps: {Val, Elt, Idx});
872}
873
874MachineInstrBuilder
875MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
876 const SrcOp &Idx) {
877 return buildInstr(Opc: TargetOpcode::G_EXTRACT_VECTOR_ELT, DstOps: Res, SrcOps: {Val, Idx});
878}
879
880MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
881 Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
882 Register NewVal, MachineMemOperand &MMO) {
883#ifndef NDEBUG
884 LLT OldValResTy = getMRI()->getType(Reg: OldValRes);
885 LLT SuccessResTy = getMRI()->getType(Reg: SuccessRes);
886 LLT AddrTy = getMRI()->getType(Reg: Addr);
887 LLT CmpValTy = getMRI()->getType(Reg: CmpVal);
888 LLT NewValTy = getMRI()->getType(Reg: NewVal);
889 assert(OldValResTy.isScalar() && "invalid operand type");
890 assert(SuccessResTy.isScalar() && "invalid operand type");
891 assert(AddrTy.isPointer() && "invalid operand type");
892 assert(CmpValTy.isValid() && "invalid operand type");
893 assert(NewValTy.isValid() && "invalid operand type");
894 assert(OldValResTy == CmpValTy && "type mismatch");
895 assert(OldValResTy == NewValTy && "type mismatch");
896#endif
897
898 return buildInstr(Opcode: TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
899 .addDef(RegNo: OldValRes)
900 .addDef(RegNo: SuccessRes)
901 .addUse(RegNo: Addr)
902 .addUse(RegNo: CmpVal)
903 .addUse(RegNo: NewVal)
904 .addMemOperand(MMO: &MMO);
905}
906
907MachineInstrBuilder
908MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr,
909 Register CmpVal, Register NewVal,
910 MachineMemOperand &MMO) {
911#ifndef NDEBUG
912 LLT OldValResTy = getMRI()->getType(Reg: OldValRes);
913 LLT AddrTy = getMRI()->getType(Reg: Addr);
914 LLT CmpValTy = getMRI()->getType(Reg: CmpVal);
915 LLT NewValTy = getMRI()->getType(Reg: NewVal);
916 assert(OldValResTy.isScalar() && "invalid operand type");
917 assert(AddrTy.isPointer() && "invalid operand type");
918 assert(CmpValTy.isValid() && "invalid operand type");
919 assert(NewValTy.isValid() && "invalid operand type");
920 assert(OldValResTy == CmpValTy && "type mismatch");
921 assert(OldValResTy == NewValTy && "type mismatch");
922#endif
923
924 return buildInstr(Opcode: TargetOpcode::G_ATOMIC_CMPXCHG)
925 .addDef(RegNo: OldValRes)
926 .addUse(RegNo: Addr)
927 .addUse(RegNo: CmpVal)
928 .addUse(RegNo: NewVal)
929 .addMemOperand(MMO: &MMO);
930}
931
932MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
933 unsigned Opcode, const DstOp &OldValRes,
934 const SrcOp &Addr, const SrcOp &Val,
935 MachineMemOperand &MMO) {
936
937#ifndef NDEBUG
938 LLT OldValResTy = OldValRes.getLLTTy(MRI: *getMRI());
939 LLT AddrTy = Addr.getLLTTy(MRI: *getMRI());
940 LLT ValTy = Val.getLLTTy(MRI: *getMRI());
941 assert(OldValResTy.isScalar() && "invalid operand type");
942 assert(AddrTy.isPointer() && "invalid operand type");
943 assert(ValTy.isValid() && "invalid operand type");
944 assert(OldValResTy == ValTy && "type mismatch");
945 assert(MMO.isAtomic() && "not atomic mem operand");
946#endif
947
948 auto MIB = buildInstr(Opcode);
949 OldValRes.addDefToMIB(MRI&: *getMRI(), MIB);
950 Addr.addSrcToMIB(MIB);
951 Val.addSrcToMIB(MIB);
952 MIB.addMemOperand(MMO: &MMO);
953 return MIB;
954}
955
956MachineInstrBuilder
957MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
958 Register Val, MachineMemOperand &MMO) {
959 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
960 MMO);
961}
962MachineInstrBuilder
963MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
964 Register Val, MachineMemOperand &MMO) {
965 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
966 MMO);
967}
968MachineInstrBuilder
969MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
970 Register Val, MachineMemOperand &MMO) {
971 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
972 MMO);
973}
974MachineInstrBuilder
975MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
976 Register Val, MachineMemOperand &MMO) {
977 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
978 MMO);
979}
980MachineInstrBuilder
981MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
982 Register Val, MachineMemOperand &MMO) {
983 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
984 MMO);
985}
986MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
987 Register Addr,
988 Register Val,
989 MachineMemOperand &MMO) {
990 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
991 MMO);
992}
993MachineInstrBuilder
994MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
995 Register Val, MachineMemOperand &MMO) {
996 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
997 MMO);
998}
999MachineInstrBuilder
1000MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
1001 Register Val, MachineMemOperand &MMO) {
1002 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1003 MMO);
1004}
1005MachineInstrBuilder
1006MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
1007 Register Val, MachineMemOperand &MMO) {
1008 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1009 MMO);
1010}
1011MachineInstrBuilder
1012MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
1013 Register Val, MachineMemOperand &MMO) {
1014 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1015 MMO);
1016}
1017MachineInstrBuilder
1018MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
1019 Register Val, MachineMemOperand &MMO) {
1020 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1021 MMO);
1022}
1023
1024MachineInstrBuilder
1025MachineIRBuilder::buildAtomicRMWFAdd(
1026 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1027 MachineMemOperand &MMO) {
1028 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1029 MMO);
1030}
1031
1032MachineInstrBuilder
1033MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1034 MachineMemOperand &MMO) {
1035 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1036 MMO);
1037}
1038
1039MachineInstrBuilder
1040MachineIRBuilder::buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr,
1041 const SrcOp &Val, MachineMemOperand &MMO) {
1042 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1043 MMO);
1044}
1045
1046MachineInstrBuilder
1047MachineIRBuilder::buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr,
1048 const SrcOp &Val, MachineMemOperand &MMO) {
1049 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1050 MMO);
1051}
1052
1053MachineInstrBuilder
1054MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1055 return buildInstr(Opcode: TargetOpcode::G_FENCE)
1056 .addImm(Val: Ordering)
1057 .addImm(Val: Scope);
1058}
1059
1060MachineInstrBuilder MachineIRBuilder::buildPrefetch(const SrcOp &Addr,
1061 unsigned RW,
1062 unsigned Locality,
1063 unsigned CacheType,
1064 MachineMemOperand &MMO) {
1065 auto MIB = buildInstr(Opcode: TargetOpcode::G_PREFETCH);
1066 Addr.addSrcToMIB(MIB);
1067 MIB.addImm(Val: RW).addImm(Val: Locality).addImm(Val: CacheType);
1068 MIB.addMemOperand(MMO: &MMO);
1069 return MIB;
1070}
1071
1072MachineInstrBuilder
1073MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
1074#ifndef NDEBUG
1075 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1076#endif
1077
1078 return buildInstr(Opcode: TargetOpcode::G_BLOCK_ADDR).addDef(RegNo: Res).addBlockAddress(BA);
1079}
1080
1081void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1082 bool IsExtend) {
1083#ifndef NDEBUG
1084 if (DstTy.isVector()) {
1085 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1086 assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1087 "different number of elements in a trunc/ext");
1088 } else
1089 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1090
1091 if (IsExtend)
1092 assert(TypeSize::isKnownGT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1093 "invalid narrowing extend");
1094 else
1095 assert(TypeSize::isKnownLT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1096 "invalid widening trunc");
1097#endif
1098}
1099
1100void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1101 const LLT Op0Ty, const LLT Op1Ty) {
1102#ifndef NDEBUG
1103 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1104 "invalid operand type");
1105 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1106 if (ResTy.isScalar() || ResTy.isPointer())
1107 assert(TstTy.isScalar() && "type mismatch");
1108 else
1109 assert((TstTy.isScalar() ||
1110 (TstTy.isVector() &&
1111 TstTy.getNumElements() == Op0Ty.getNumElements())) &&
1112 "type mismatch");
1113#endif
1114}
1115
1116MachineInstrBuilder
1117MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
1118 ArrayRef<SrcOp> SrcOps,
1119 std::optional<unsigned> Flags) {
1120 switch (Opc) {
1121 default:
1122 break;
1123 case TargetOpcode::G_SELECT: {
1124 assert(DstOps.size() == 1 && "Invalid select");
1125 assert(SrcOps.size() == 3 && "Invalid select");
1126 validateSelectOp(
1127 ResTy: DstOps[0].getLLTTy(MRI: *getMRI()), TstTy: SrcOps[0].getLLTTy(MRI: *getMRI()),
1128 Op0Ty: SrcOps[1].getLLTTy(MRI: *getMRI()), Op1Ty: SrcOps[2].getLLTTy(MRI: *getMRI()));
1129 break;
1130 }
1131 case TargetOpcode::G_FNEG:
1132 case TargetOpcode::G_ABS:
1133 // All these are unary ops.
1134 assert(DstOps.size() == 1 && "Invalid Dst");
1135 assert(SrcOps.size() == 1 && "Invalid Srcs");
1136 validateUnaryOp(Res: DstOps[0].getLLTTy(MRI: *getMRI()),
1137 Op0: SrcOps[0].getLLTTy(MRI: *getMRI()));
1138 break;
1139 case TargetOpcode::G_ADD:
1140 case TargetOpcode::G_AND:
1141 case TargetOpcode::G_MUL:
1142 case TargetOpcode::G_OR:
1143 case TargetOpcode::G_SUB:
1144 case TargetOpcode::G_XOR:
1145 case TargetOpcode::G_UDIV:
1146 case TargetOpcode::G_SDIV:
1147 case TargetOpcode::G_UREM:
1148 case TargetOpcode::G_SREM:
1149 case TargetOpcode::G_SMIN:
1150 case TargetOpcode::G_SMAX:
1151 case TargetOpcode::G_UMIN:
1152 case TargetOpcode::G_UMAX:
1153 case TargetOpcode::G_UADDSAT:
1154 case TargetOpcode::G_SADDSAT:
1155 case TargetOpcode::G_USUBSAT:
1156 case TargetOpcode::G_SSUBSAT: {
1157 // All these are binary ops.
1158 assert(DstOps.size() == 1 && "Invalid Dst");
1159 assert(SrcOps.size() == 2 && "Invalid Srcs");
1160 validateBinaryOp(Res: DstOps[0].getLLTTy(MRI: *getMRI()),
1161 Op0: SrcOps[0].getLLTTy(MRI: *getMRI()),
1162 Op1: SrcOps[1].getLLTTy(MRI: *getMRI()));
1163 break;
1164 }
1165 case TargetOpcode::G_SHL:
1166 case TargetOpcode::G_ASHR:
1167 case TargetOpcode::G_LSHR:
1168 case TargetOpcode::G_USHLSAT:
1169 case TargetOpcode::G_SSHLSAT: {
1170 assert(DstOps.size() == 1 && "Invalid Dst");
1171 assert(SrcOps.size() == 2 && "Invalid Srcs");
1172 validateShiftOp(Res: DstOps[0].getLLTTy(MRI: *getMRI()),
1173 Op0: SrcOps[0].getLLTTy(MRI: *getMRI()),
1174 Op1: SrcOps[1].getLLTTy(MRI: *getMRI()));
1175 break;
1176 }
1177 case TargetOpcode::G_SEXT:
1178 case TargetOpcode::G_ZEXT:
1179 case TargetOpcode::G_ANYEXT:
1180 assert(DstOps.size() == 1 && "Invalid Dst");
1181 assert(SrcOps.size() == 1 && "Invalid Srcs");
1182 validateTruncExt(DstTy: DstOps[0].getLLTTy(MRI: *getMRI()),
1183 SrcTy: SrcOps[0].getLLTTy(MRI: *getMRI()), IsExtend: true);
1184 break;
1185 case TargetOpcode::G_TRUNC:
1186 case TargetOpcode::G_FPTRUNC: {
1187 assert(DstOps.size() == 1 && "Invalid Dst");
1188 assert(SrcOps.size() == 1 && "Invalid Srcs");
1189 validateTruncExt(DstTy: DstOps[0].getLLTTy(MRI: *getMRI()),
1190 SrcTy: SrcOps[0].getLLTTy(MRI: *getMRI()), IsExtend: false);
1191 break;
1192 }
1193 case TargetOpcode::G_BITCAST: {
1194 assert(DstOps.size() == 1 && "Invalid Dst");
1195 assert(SrcOps.size() == 1 && "Invalid Srcs");
1196 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1197 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1198 break;
1199 }
1200 case TargetOpcode::COPY:
1201 assert(DstOps.size() == 1 && "Invalid Dst");
1202 // If the caller wants to add a subreg source it has to be done separately
1203 // so we may not have any SrcOps at this point yet.
1204 break;
1205 case TargetOpcode::G_FCMP:
1206 case TargetOpcode::G_ICMP: {
1207 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1208 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1209 // For F/ICMP, the first src operand is the predicate, followed by
1210 // the two comparands.
1211 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1212 "Expecting predicate");
1213 assert([&]() -> bool {
1214 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1215 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1216 : CmpInst::isFPPredicate(Pred);
1217 }() && "Invalid predicate");
1218 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1219 "Type mismatch");
1220 assert([&]() -> bool {
1221 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1222 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1223 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1224 return DstTy.isScalar();
1225 else
1226 return DstTy.isVector() &&
1227 DstTy.getNumElements() == Op0Ty.getNumElements();
1228 }() && "Type Mismatch");
1229 break;
1230 }
1231 case TargetOpcode::G_UNMERGE_VALUES: {
1232 assert(!DstOps.empty() && "Invalid trivial sequence");
1233 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1234 assert(llvm::all_of(DstOps,
1235 [&, this](const DstOp &Op) {
1236 return Op.getLLTTy(*getMRI()) ==
1237 DstOps[0].getLLTTy(*getMRI());
1238 }) &&
1239 "type mismatch in output list");
1240 assert((TypeSize::ScalarTy)DstOps.size() *
1241 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1242 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1243 "input operands do not cover output register");
1244 break;
1245 }
1246 case TargetOpcode::G_MERGE_VALUES: {
1247 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1248 assert(DstOps.size() == 1 && "Invalid Dst");
1249 assert(llvm::all_of(SrcOps,
1250 [&, this](const SrcOp &Op) {
1251 return Op.getLLTTy(*getMRI()) ==
1252 SrcOps[0].getLLTTy(*getMRI());
1253 }) &&
1254 "type mismatch in input list");
1255 assert((TypeSize::ScalarTy)SrcOps.size() *
1256 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1257 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1258 "input operands do not cover output register");
1259 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1260 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1261 break;
1262 }
1263 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1264 assert(DstOps.size() == 1 && "Invalid Dst size");
1265 assert(SrcOps.size() == 2 && "Invalid Src size");
1266 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1267 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1268 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1269 "Invalid operand type");
1270 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1271 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1272 DstOps[0].getLLTTy(*getMRI()) &&
1273 "Type mismatch");
1274 break;
1275 }
1276 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1277 assert(DstOps.size() == 1 && "Invalid dst size");
1278 assert(SrcOps.size() == 3 && "Invalid src size");
1279 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1280 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1281 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1282 SrcOps[1].getLLTTy(*getMRI()) &&
1283 "Type mismatch");
1284 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1285 assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1286 SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1287 "Type mismatch");
1288 break;
1289 }
1290 case TargetOpcode::G_BUILD_VECTOR: {
1291 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1292 "Must have at least 2 operands");
1293 assert(DstOps.size() == 1 && "Invalid DstOps");
1294 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1295 "Res type must be a vector");
1296 assert(llvm::all_of(SrcOps,
1297 [&, this](const SrcOp &Op) {
1298 return Op.getLLTTy(*getMRI()) ==
1299 SrcOps[0].getLLTTy(*getMRI());
1300 }) &&
1301 "type mismatch in input list");
1302 assert((TypeSize::ScalarTy)SrcOps.size() *
1303 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1304 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1305 "input scalars do not exactly cover the output vector register");
1306 break;
1307 }
1308 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1309 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1310 "Must have at least 2 operands");
1311 assert(DstOps.size() == 1 && "Invalid DstOps");
1312 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1313 "Res type must be a vector");
1314 assert(llvm::all_of(SrcOps,
1315 [&, this](const SrcOp &Op) {
1316 return Op.getLLTTy(*getMRI()) ==
1317 SrcOps[0].getLLTTy(*getMRI());
1318 }) &&
1319 "type mismatch in input list");
1320 break;
1321 }
1322 case TargetOpcode::G_CONCAT_VECTORS: {
1323 assert(DstOps.size() == 1 && "Invalid DstOps");
1324 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1325 "Must have at least 2 operands");
1326 assert(llvm::all_of(SrcOps,
1327 [&, this](const SrcOp &Op) {
1328 return (Op.getLLTTy(*getMRI()).isVector() &&
1329 Op.getLLTTy(*getMRI()) ==
1330 SrcOps[0].getLLTTy(*getMRI()));
1331 }) &&
1332 "type mismatch in input list");
1333 assert((TypeSize::ScalarTy)SrcOps.size() *
1334 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1335 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1336 "input vectors do not exactly cover the output vector register");
1337 break;
1338 }
1339 case TargetOpcode::G_UADDE: {
1340 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1341 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1342 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1343 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1344 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1345 "Invalid operand");
1346 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1347 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1348 "type mismatch");
1349 break;
1350 }
1351 }
1352
1353 auto MIB = buildInstr(Opcode: Opc);
1354 for (const DstOp &Op : DstOps)
1355 Op.addDefToMIB(MRI&: *getMRI(), MIB);
1356 for (const SrcOp &Op : SrcOps)
1357 Op.addSrcToMIB(MIB);
1358 if (Flags)
1359 MIB->setFlags(*Flags);
1360 return MIB;
1361}
1362

source code of llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp