1//===-- lib/CodeGen/GlobalISel/InlineAsmLowering.cpp ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the lowering from LLVM IR inline asm to MIR INLINEASM
11///
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
15#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
16#include "llvm/CodeGen/MachineOperand.h"
17#include "llvm/CodeGen/MachineRegisterInfo.h"
18#include "llvm/CodeGen/TargetLowering.h"
19#include "llvm/IR/Module.h"
20
21#define DEBUG_TYPE "inline-asm-lowering"
22
23using namespace llvm;
24
25void InlineAsmLowering::anchor() {}
26
27namespace {
28
29/// GISelAsmOperandInfo - This contains information for each constraint that we
30/// are lowering.
31class GISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
32public:
33 /// Regs - If this is a register or register class operand, this
34 /// contains the set of assigned registers corresponding to the operand.
35 SmallVector<Register, 1> Regs;
36
37 explicit GISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &Info)
38 : TargetLowering::AsmOperandInfo(Info) {}
39};
40
41using GISelAsmOperandInfoVector = SmallVector<GISelAsmOperandInfo, 16>;
42
43class ExtraFlags {
44 unsigned Flags = 0;
45
46public:
47 explicit ExtraFlags(const CallBase &CB) {
48 const InlineAsm *IA = cast<InlineAsm>(Val: CB.getCalledOperand());
49 if (IA->hasSideEffects())
50 Flags |= InlineAsm::Extra_HasSideEffects;
51 if (IA->isAlignStack())
52 Flags |= InlineAsm::Extra_IsAlignStack;
53 if (CB.isConvergent())
54 Flags |= InlineAsm::Extra_IsConvergent;
55 Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
56 }
57
58 void update(const TargetLowering::AsmOperandInfo &OpInfo) {
59 // Ideally, we would only check against memory constraints. However, the
60 // meaning of an Other constraint can be target-specific and we can't easily
61 // reason about it. Therefore, be conservative and set MayLoad/MayStore
62 // for Other constraints as well.
63 if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
64 OpInfo.ConstraintType == TargetLowering::C_Other) {
65 if (OpInfo.Type == InlineAsm::isInput)
66 Flags |= InlineAsm::Extra_MayLoad;
67 else if (OpInfo.Type == InlineAsm::isOutput)
68 Flags |= InlineAsm::Extra_MayStore;
69 else if (OpInfo.Type == InlineAsm::isClobber)
70 Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
71 }
72 }
73
74 unsigned get() const { return Flags; }
75};
76
77} // namespace
78
79/// Assign virtual/physical registers for the specified register operand.
80static void getRegistersForValue(MachineFunction &MF,
81 MachineIRBuilder &MIRBuilder,
82 GISelAsmOperandInfo &OpInfo,
83 GISelAsmOperandInfo &RefOpInfo) {
84
85 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
86 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
87
88 // No work to do for memory operations.
89 if (OpInfo.ConstraintType == TargetLowering::C_Memory)
90 return;
91
92 // If this is a constraint for a single physreg, or a constraint for a
93 // register class, find it.
94 Register AssignedReg;
95 const TargetRegisterClass *RC;
96 std::tie(args&: AssignedReg, args&: RC) = TLI.getRegForInlineAsmConstraint(
97 TRI: &TRI, Constraint: RefOpInfo.ConstraintCode, VT: RefOpInfo.ConstraintVT);
98 // RC is unset only on failure. Return immediately.
99 if (!RC)
100 return;
101
102 // No need to allocate a matching input constraint since the constraint it's
103 // matching to has already been allocated.
104 if (OpInfo.isMatchingInputConstraint())
105 return;
106
107 // Initialize NumRegs.
108 unsigned NumRegs = 1;
109 if (OpInfo.ConstraintVT != MVT::Other)
110 NumRegs =
111 TLI.getNumRegisters(Context&: MF.getFunction().getContext(), VT: OpInfo.ConstraintVT);
112
113 // If this is a constraint for a specific physical register, but the type of
114 // the operand requires more than one register to be passed, we allocate the
115 // required amount of physical registers, starting from the selected physical
116 // register.
117 // For this, first retrieve a register iterator for the given register class
118 TargetRegisterClass::iterator I = RC->begin();
119 MachineRegisterInfo &RegInfo = MF.getRegInfo();
120
121 // Advance the iterator to the assigned register (if set)
122 if (AssignedReg) {
123 for (; *I != AssignedReg; ++I)
124 assert(I != RC->end() && "AssignedReg should be a member of provided RC");
125 }
126
127 // Finally, assign the registers. If the AssignedReg isn't set, create virtual
128 // registers with the provided register class
129 for (; NumRegs; --NumRegs, ++I) {
130 assert(I != RC->end() && "Ran out of registers to allocate!");
131 Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RegClass: RC);
132 OpInfo.Regs.push_back(Elt: R);
133 }
134}
135
136static void computeConstraintToUse(const TargetLowering *TLI,
137 TargetLowering::AsmOperandInfo &OpInfo) {
138 assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
139
140 // Single-letter constraints ('r') are very common.
141 if (OpInfo.Codes.size() == 1) {
142 OpInfo.ConstraintCode = OpInfo.Codes[0];
143 OpInfo.ConstraintType = TLI->getConstraintType(Constraint: OpInfo.ConstraintCode);
144 } else {
145 TargetLowering::ConstraintGroup G = TLI->getConstraintPreferences(OpInfo);
146 if (G.empty())
147 return;
148 // FIXME: prefer immediate constraints if the target allows it
149 unsigned BestIdx = 0;
150 for (const unsigned E = G.size();
151 BestIdx < E && (G[BestIdx].second == TargetLowering::C_Other ||
152 G[BestIdx].second == TargetLowering::C_Immediate);
153 ++BestIdx)
154 ;
155 OpInfo.ConstraintCode = G[BestIdx].first;
156 OpInfo.ConstraintType = G[BestIdx].second;
157 }
158
159 // 'X' matches anything.
160 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
161 // Labels and constants are handled elsewhere ('X' is the only thing
162 // that matches labels). For Functions, the type here is the type of
163 // the result, which is not what we want to look at; leave them alone.
164 Value *Val = OpInfo.CallOperandVal;
165 if (isa<BasicBlock>(Val) || isa<ConstantInt>(Val) || isa<Function>(Val))
166 return;
167
168 // Otherwise, try to resolve it to something we know about by looking at
169 // the actual operand type.
170 if (const char *Repl = TLI->LowerXConstraint(ConstraintVT: OpInfo.ConstraintVT)) {
171 OpInfo.ConstraintCode = Repl;
172 OpInfo.ConstraintType = TLI->getConstraintType(Constraint: OpInfo.ConstraintCode);
173 }
174 }
175}
176
177static unsigned getNumOpRegs(const MachineInstr &I, unsigned OpIdx) {
178 const InlineAsm::Flag F(I.getOperand(i: OpIdx).getImm());
179 return F.getNumOperandRegisters();
180}
181
182static bool buildAnyextOrCopy(Register Dst, Register Src,
183 MachineIRBuilder &MIRBuilder) {
184 const TargetRegisterInfo *TRI =
185 MIRBuilder.getMF().getSubtarget().getRegisterInfo();
186 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
187
188 auto SrcTy = MRI->getType(Reg: Src);
189 if (!SrcTy.isValid()) {
190 LLVM_DEBUG(dbgs() << "Source type for copy is not valid\n");
191 return false;
192 }
193 unsigned SrcSize = TRI->getRegSizeInBits(Reg: Src, MRI: *MRI);
194 unsigned DstSize = TRI->getRegSizeInBits(Reg: Dst, MRI: *MRI);
195
196 if (DstSize < SrcSize) {
197 LLVM_DEBUG(dbgs() << "Input can't fit in destination reg class\n");
198 return false;
199 }
200
201 // Attempt to anyext small scalar sources.
202 if (DstSize > SrcSize) {
203 if (!SrcTy.isScalar()) {
204 LLVM_DEBUG(dbgs() << "Can't extend non-scalar input to size of"
205 "destination register class\n");
206 return false;
207 }
208 Src = MIRBuilder.buildAnyExt(Res: LLT::scalar(SizeInBits: DstSize), Op: Src).getReg(Idx: 0);
209 }
210
211 MIRBuilder.buildCopy(Res: Dst, Op: Src);
212 return true;
213}
214
215bool InlineAsmLowering::lowerInlineAsm(
216 MachineIRBuilder &MIRBuilder, const CallBase &Call,
217 std::function<ArrayRef<Register>(const Value &Val)> GetOrCreateVRegs)
218 const {
219 const InlineAsm *IA = cast<InlineAsm>(Val: Call.getCalledOperand());
220
221 /// ConstraintOperands - Information about all of the constraints.
222 GISelAsmOperandInfoVector ConstraintOperands;
223
224 MachineFunction &MF = MIRBuilder.getMF();
225 const Function &F = MF.getFunction();
226 const DataLayout &DL = F.getParent()->getDataLayout();
227 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
228
229 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
230
231 TargetLowering::AsmOperandInfoVector TargetConstraints =
232 TLI->ParseConstraints(DL, TRI, Call);
233
234 ExtraFlags ExtraInfo(Call);
235 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
236 unsigned ResNo = 0; // ResNo - The result number of the next output.
237 for (auto &T : TargetConstraints) {
238 ConstraintOperands.push_back(Elt: GISelAsmOperandInfo(T));
239 GISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
240
241 // Compute the value type for each operand.
242 if (OpInfo.hasArg()) {
243 OpInfo.CallOperandVal = const_cast<Value *>(Call.getArgOperand(i: ArgNo));
244
245 if (isa<BasicBlock>(Val: OpInfo.CallOperandVal)) {
246 LLVM_DEBUG(dbgs() << "Basic block input operands not supported yet\n");
247 return false;
248 }
249
250 Type *OpTy = OpInfo.CallOperandVal->getType();
251
252 // If this is an indirect operand, the operand is a pointer to the
253 // accessed type.
254 if (OpInfo.isIndirect) {
255 OpTy = Call.getParamElementType(ArgNo);
256 assert(OpTy && "Indirect operand must have elementtype attribute");
257 }
258
259 // FIXME: Support aggregate input operands
260 if (!OpTy->isSingleValueType()) {
261 LLVM_DEBUG(
262 dbgs() << "Aggregate input operands are not supported yet\n");
263 return false;
264 }
265
266 OpInfo.ConstraintVT =
267 TLI->getAsmOperandValueType(DL, Ty: OpTy, AllowUnknown: true).getSimpleVT();
268 ++ArgNo;
269 } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
270 assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
271 if (StructType *STy = dyn_cast<StructType>(Val: Call.getType())) {
272 OpInfo.ConstraintVT =
273 TLI->getSimpleValueType(DL, Ty: STy->getElementType(N: ResNo));
274 } else {
275 assert(ResNo == 0 && "Asm only has one result!");
276 OpInfo.ConstraintVT =
277 TLI->getAsmOperandValueType(DL, Ty: Call.getType()).getSimpleVT();
278 }
279 ++ResNo;
280 } else {
281 assert(OpInfo.Type != InlineAsm::isLabel &&
282 "GlobalISel currently doesn't support callbr");
283 OpInfo.ConstraintVT = MVT::Other;
284 }
285
286 if (OpInfo.ConstraintVT == MVT::i64x8)
287 return false;
288
289 // Compute the constraint code and ConstraintType to use.
290 computeConstraintToUse(TLI, OpInfo);
291
292 // The selected constraint type might expose new sideeffects
293 ExtraInfo.update(OpInfo);
294 }
295
296 // At this point, all operand types are decided.
297 // Create the MachineInstr, but don't insert it yet since input
298 // operands still need to insert instructions before this one
299 auto Inst = MIRBuilder.buildInstrNoInsert(Opcode: TargetOpcode::INLINEASM)
300 .addExternalSymbol(FnName: IA->getAsmString().c_str())
301 .addImm(Val: ExtraInfo.get());
302
303 // Starting from this operand: flag followed by register(s) will be added as
304 // operands to Inst for each constraint. Used for matching input constraints.
305 unsigned StartIdx = Inst->getNumOperands();
306
307 // Collects the output operands for later processing
308 GISelAsmOperandInfoVector OutputOperands;
309
310 for (auto &OpInfo : ConstraintOperands) {
311 GISelAsmOperandInfo &RefOpInfo =
312 OpInfo.isMatchingInputConstraint()
313 ? ConstraintOperands[OpInfo.getMatchedOperand()]
314 : OpInfo;
315
316 // Assign registers for register operands
317 getRegistersForValue(MF, MIRBuilder, OpInfo, RefOpInfo);
318
319 switch (OpInfo.Type) {
320 case InlineAsm::isOutput:
321 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
322 const InlineAsm::ConstraintCode ConstraintID =
323 TLI->getInlineAsmMemConstraint(ConstraintCode: OpInfo.ConstraintCode);
324 assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
325 "Failed to convert memory constraint code to constraint id.");
326
327 // Add information to the INLINEASM instruction to know about this
328 // output.
329 InlineAsm::Flag Flag(InlineAsm::Kind::Mem, 1);
330 Flag.setMemConstraint(ConstraintID);
331 Inst.addImm(Val: Flag);
332 ArrayRef<Register> SourceRegs =
333 GetOrCreateVRegs(*OpInfo.CallOperandVal);
334 assert(
335 SourceRegs.size() == 1 &&
336 "Expected the memory output to fit into a single virtual register");
337 Inst.addReg(RegNo: SourceRegs[0]);
338 } else {
339 // Otherwise, this outputs to a register (directly for C_Register /
340 // C_RegisterClass/C_Other.
341 assert(OpInfo.ConstraintType == TargetLowering::C_Register ||
342 OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
343 OpInfo.ConstraintType == TargetLowering::C_Other);
344
345 // Find a register that we can use.
346 if (OpInfo.Regs.empty()) {
347 LLVM_DEBUG(dbgs()
348 << "Couldn't allocate output register for constraint\n");
349 return false;
350 }
351
352 // Add information to the INLINEASM instruction to know that this
353 // register is set.
354 InlineAsm::Flag Flag(OpInfo.isEarlyClobber
355 ? InlineAsm::Kind::RegDefEarlyClobber
356 : InlineAsm::Kind::RegDef,
357 OpInfo.Regs.size());
358 if (OpInfo.Regs.front().isVirtual()) {
359 // Put the register class of the virtual registers in the flag word.
360 // That way, later passes can recompute register class constraints for
361 // inline assembly as well as normal instructions. Don't do this for
362 // tied operands that can use the regclass information from the def.
363 const TargetRegisterClass *RC = MRI->getRegClass(Reg: OpInfo.Regs.front());
364 Flag.setRegClass(RC->getID());
365 }
366
367 Inst.addImm(Val: Flag);
368
369 for (Register Reg : OpInfo.Regs) {
370 Inst.addReg(RegNo: Reg,
371 flags: RegState::Define | getImplRegState(B: Reg.isPhysical()) |
372 (OpInfo.isEarlyClobber ? RegState::EarlyClobber : 0));
373 }
374
375 // Remember this output operand for later processing
376 OutputOperands.push_back(Elt: OpInfo);
377 }
378
379 break;
380 case InlineAsm::isInput:
381 case InlineAsm::isLabel: {
382 if (OpInfo.isMatchingInputConstraint()) {
383 unsigned DefIdx = OpInfo.getMatchedOperand();
384 // Find operand with register def that corresponds to DefIdx.
385 unsigned InstFlagIdx = StartIdx;
386 for (unsigned i = 0; i < DefIdx; ++i)
387 InstFlagIdx += getNumOpRegs(I: *Inst, OpIdx: InstFlagIdx) + 1;
388 assert(getNumOpRegs(*Inst, InstFlagIdx) == 1 && "Wrong flag");
389
390 const InlineAsm::Flag MatchedOperandFlag(Inst->getOperand(i: InstFlagIdx).getImm());
391 if (MatchedOperandFlag.isMemKind()) {
392 LLVM_DEBUG(dbgs() << "Matching input constraint to mem operand not "
393 "supported. This should be target specific.\n");
394 return false;
395 }
396 if (!MatchedOperandFlag.isRegDefKind() && !MatchedOperandFlag.isRegDefEarlyClobberKind()) {
397 LLVM_DEBUG(dbgs() << "Unknown matching constraint\n");
398 return false;
399 }
400
401 // We want to tie input to register in next operand.
402 unsigned DefRegIdx = InstFlagIdx + 1;
403 Register Def = Inst->getOperand(i: DefRegIdx).getReg();
404
405 ArrayRef<Register> SrcRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal);
406 assert(SrcRegs.size() == 1 && "Single register is expected here");
407
408 // When Def is physreg: use given input.
409 Register In = SrcRegs[0];
410 // When Def is vreg: copy input to new vreg with same reg class as Def.
411 if (Def.isVirtual()) {
412 In = MRI->createVirtualRegister(RegClass: MRI->getRegClass(Reg: Def));
413 if (!buildAnyextOrCopy(Dst: In, Src: SrcRegs[0], MIRBuilder))
414 return false;
415 }
416
417 // Add Flag and input register operand (In) to Inst. Tie In to Def.
418 InlineAsm::Flag UseFlag(InlineAsm::Kind::RegUse, 1);
419 UseFlag.setMatchingOp(DefIdx);
420 Inst.addImm(Val: UseFlag);
421 Inst.addReg(RegNo: In);
422 Inst->tieOperands(DefIdx: DefRegIdx, UseIdx: Inst->getNumOperands() - 1);
423 break;
424 }
425
426 if (OpInfo.ConstraintType == TargetLowering::C_Other &&
427 OpInfo.isIndirect) {
428 LLVM_DEBUG(dbgs() << "Indirect input operands with unknown constraint "
429 "not supported yet\n");
430 return false;
431 }
432
433 if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
434 OpInfo.ConstraintType == TargetLowering::C_Other) {
435
436 std::vector<MachineOperand> Ops;
437 if (!lowerAsmOperandForConstraint(Val: OpInfo.CallOperandVal,
438 Constraint: OpInfo.ConstraintCode, Ops,
439 MIRBuilder)) {
440 LLVM_DEBUG(dbgs() << "Don't support constraint: "
441 << OpInfo.ConstraintCode << " yet\n");
442 return false;
443 }
444
445 assert(Ops.size() > 0 &&
446 "Expected constraint to be lowered to at least one operand");
447
448 // Add information to the INLINEASM node to know about this input.
449 const unsigned OpFlags =
450 InlineAsm::Flag(InlineAsm::Kind::Imm, Ops.size());
451 Inst.addImm(Val: OpFlags);
452 Inst.add(MOs: Ops);
453 break;
454 }
455
456 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
457
458 if (!OpInfo.isIndirect) {
459 LLVM_DEBUG(dbgs()
460 << "Cannot indirectify memory input operands yet\n");
461 return false;
462 }
463
464 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
465
466 const InlineAsm::ConstraintCode ConstraintID =
467 TLI->getInlineAsmMemConstraint(ConstraintCode: OpInfo.ConstraintCode);
468 InlineAsm::Flag OpFlags(InlineAsm::Kind::Mem, 1);
469 OpFlags.setMemConstraint(ConstraintID);
470 Inst.addImm(Val: OpFlags);
471 ArrayRef<Register> SourceRegs =
472 GetOrCreateVRegs(*OpInfo.CallOperandVal);
473 assert(
474 SourceRegs.size() == 1 &&
475 "Expected the memory input to fit into a single virtual register");
476 Inst.addReg(RegNo: SourceRegs[0]);
477 break;
478 }
479
480 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
481 OpInfo.ConstraintType == TargetLowering::C_Register) &&
482 "Unknown constraint type!");
483
484 if (OpInfo.isIndirect) {
485 LLVM_DEBUG(dbgs() << "Can't handle indirect register inputs yet "
486 "for constraint '"
487 << OpInfo.ConstraintCode << "'\n");
488 return false;
489 }
490
491 // Copy the input into the appropriate registers.
492 if (OpInfo.Regs.empty()) {
493 LLVM_DEBUG(
494 dbgs()
495 << "Couldn't allocate input register for register constraint\n");
496 return false;
497 }
498
499 unsigned NumRegs = OpInfo.Regs.size();
500 ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal);
501 assert(NumRegs == SourceRegs.size() &&
502 "Expected the number of input registers to match the number of "
503 "source registers");
504
505 if (NumRegs > 1) {
506 LLVM_DEBUG(dbgs() << "Input operands with multiple input registers are "
507 "not supported yet\n");
508 return false;
509 }
510
511 InlineAsm::Flag Flag(InlineAsm::Kind::RegUse, NumRegs);
512 if (OpInfo.Regs.front().isVirtual()) {
513 // Put the register class of the virtual registers in the flag word.
514 const TargetRegisterClass *RC = MRI->getRegClass(Reg: OpInfo.Regs.front());
515 Flag.setRegClass(RC->getID());
516 }
517 Inst.addImm(Val: Flag);
518 if (!buildAnyextOrCopy(Dst: OpInfo.Regs[0], Src: SourceRegs[0], MIRBuilder))
519 return false;
520 Inst.addReg(RegNo: OpInfo.Regs[0]);
521 break;
522 }
523
524 case InlineAsm::isClobber: {
525
526 const unsigned NumRegs = OpInfo.Regs.size();
527 if (NumRegs > 0) {
528 unsigned Flag = InlineAsm::Flag(InlineAsm::Kind::Clobber, NumRegs);
529 Inst.addImm(Val: Flag);
530
531 for (Register Reg : OpInfo.Regs) {
532 Inst.addReg(RegNo: Reg, flags: RegState::Define | RegState::EarlyClobber |
533 getImplRegState(B: Reg.isPhysical()));
534 }
535 }
536 break;
537 }
538 }
539 }
540
541 if (const MDNode *SrcLoc = Call.getMetadata(Kind: "srcloc"))
542 Inst.addMetadata(MD: SrcLoc);
543
544 // All inputs are handled, insert the instruction now
545 MIRBuilder.insertInstr(MIB: Inst);
546
547 // Finally, copy the output operands into the output registers
548 ArrayRef<Register> ResRegs = GetOrCreateVRegs(Call);
549 if (ResRegs.size() != OutputOperands.size()) {
550 LLVM_DEBUG(dbgs() << "Expected the number of output registers to match the "
551 "number of destination registers\n");
552 return false;
553 }
554 for (unsigned int i = 0, e = ResRegs.size(); i < e; i++) {
555 GISelAsmOperandInfo &OpInfo = OutputOperands[i];
556
557 if (OpInfo.Regs.empty())
558 continue;
559
560 switch (OpInfo.ConstraintType) {
561 case TargetLowering::C_Register:
562 case TargetLowering::C_RegisterClass: {
563 if (OpInfo.Regs.size() > 1) {
564 LLVM_DEBUG(dbgs() << "Output operands with multiple defining "
565 "registers are not supported yet\n");
566 return false;
567 }
568
569 Register SrcReg = OpInfo.Regs[0];
570 unsigned SrcSize = TRI->getRegSizeInBits(Reg: SrcReg, MRI: *MRI);
571 LLT ResTy = MRI->getType(Reg: ResRegs[i]);
572 if (ResTy.isScalar() && ResTy.getSizeInBits() < SrcSize) {
573 // First copy the non-typed virtual register into a generic virtual
574 // register
575 Register Tmp1Reg =
576 MRI->createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: SrcSize));
577 MIRBuilder.buildCopy(Res: Tmp1Reg, Op: SrcReg);
578 // Need to truncate the result of the register
579 MIRBuilder.buildTrunc(Res: ResRegs[i], Op: Tmp1Reg);
580 } else if (ResTy.getSizeInBits() == SrcSize) {
581 MIRBuilder.buildCopy(Res: ResRegs[i], Op: SrcReg);
582 } else {
583 LLVM_DEBUG(dbgs() << "Unhandled output operand with "
584 "mismatched register size\n");
585 return false;
586 }
587
588 break;
589 }
590 case TargetLowering::C_Immediate:
591 case TargetLowering::C_Other:
592 LLVM_DEBUG(
593 dbgs() << "Cannot lower target specific output constraints yet\n");
594 return false;
595 case TargetLowering::C_Memory:
596 break; // Already handled.
597 case TargetLowering::C_Address:
598 break; // Silence warning.
599 case TargetLowering::C_Unknown:
600 LLVM_DEBUG(dbgs() << "Unexpected unknown constraint\n");
601 return false;
602 }
603 }
604
605 return true;
606}
607
608bool InlineAsmLowering::lowerAsmOperandForConstraint(
609 Value *Val, StringRef Constraint, std::vector<MachineOperand> &Ops,
610 MachineIRBuilder &MIRBuilder) const {
611 if (Constraint.size() > 1)
612 return false;
613
614 char ConstraintLetter = Constraint[0];
615 switch (ConstraintLetter) {
616 default:
617 return false;
618 case 'i': // Simple Integer or Relocatable Constant
619 case 'n': // immediate integer with a known value.
620 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
621 assert(CI->getBitWidth() <= 64 &&
622 "expected immediate to fit into 64-bits");
623 // Boolean constants should be zero-extended, others are sign-extended
624 bool IsBool = CI->getBitWidth() == 1;
625 int64_t ExtVal = IsBool ? CI->getZExtValue() : CI->getSExtValue();
626 Ops.push_back(x: MachineOperand::CreateImm(Val: ExtVal));
627 return true;
628 }
629 return false;
630 }
631}
632

source code of llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp