1 | //===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | /// \file |
9 | /// This file declares the IRTranslator pass. |
10 | /// This pass is responsible for translating LLVM IR into MachineInstr. |
11 | /// It uses target hooks to lower the ABI but aside from that, the pass |
12 | /// generated code is generic. This is the default translator used for |
13 | /// GlobalISel. |
14 | /// |
15 | /// \todo Replace the comments with actual doxygen comments. |
16 | //===----------------------------------------------------------------------===// |
17 | |
18 | #ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H |
19 | #define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H |
20 | |
21 | #include "llvm/ADT/DenseMap.h" |
22 | #include "llvm/ADT/SmallVector.h" |
23 | #include "llvm/CodeGen/CodeGenCommonISel.h" |
24 | #include "llvm/CodeGen/FunctionLoweringInfo.h" |
25 | #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" |
26 | #include "llvm/CodeGen/MachineFunctionPass.h" |
27 | #include "llvm/CodeGen/SwiftErrorValueTracking.h" |
28 | #include "llvm/CodeGen/SwitchLoweringUtils.h" |
29 | #include "llvm/Support/Allocator.h" |
30 | #include "llvm/Support/CodeGen.h" |
31 | #include <memory> |
32 | #include <utility> |
33 | |
34 | namespace llvm { |
35 | |
36 | class AllocaInst; |
37 | class AssumptionCache; |
38 | class BasicBlock; |
39 | class CallInst; |
40 | class CallLowering; |
41 | class Constant; |
42 | class ConstrainedFPIntrinsic; |
43 | class DataLayout; |
44 | class DbgDeclareInst; |
45 | class DbgValueInst; |
46 | class Instruction; |
47 | class MachineBasicBlock; |
48 | class MachineFunction; |
49 | class MachineInstr; |
50 | class MachineRegisterInfo; |
51 | class ; |
52 | class PHINode; |
53 | class TargetLibraryInfo; |
54 | class TargetPassConfig; |
55 | class User; |
56 | class Value; |
57 | |
58 | // Technically the pass should run on an hypothetical MachineModule, |
59 | // since it should translate Global into some sort of MachineGlobal. |
60 | // The MachineGlobal should ultimately just be a transfer of ownership of |
61 | // the interesting bits that are relevant to represent a global value. |
62 | // That being said, we could investigate what would it cost to just duplicate |
63 | // the information from the LLVM IR. |
64 | // The idea is that ultimately we would be able to free up the memory used |
65 | // by the LLVM IR as soon as the translation is over. |
66 | class IRTranslator : public MachineFunctionPass { |
67 | public: |
68 | static char ID; |
69 | |
70 | private: |
71 | /// Interface used to lower the everything related to calls. |
72 | const CallLowering *CLI = nullptr; |
73 | |
74 | /// This class contains the mapping between the Values to vreg related data. |
75 | class ValueToVRegInfo { |
76 | public: |
77 | ValueToVRegInfo() = default; |
78 | |
79 | using VRegListT = SmallVector<Register, 1>; |
80 | using OffsetListT = SmallVector<uint64_t, 1>; |
81 | |
82 | using const_vreg_iterator = |
83 | DenseMap<const Value *, VRegListT *>::const_iterator; |
84 | using const_offset_iterator = |
85 | DenseMap<const Value *, OffsetListT *>::const_iterator; |
86 | |
87 | inline const_vreg_iterator vregs_end() const { return ValToVRegs.end(); } |
88 | |
89 | VRegListT *getVRegs(const Value &V) { |
90 | auto It = ValToVRegs.find(Val: &V); |
91 | if (It != ValToVRegs.end()) |
92 | return It->second; |
93 | |
94 | return insertVRegs(V); |
95 | } |
96 | |
97 | OffsetListT *getOffsets(const Value &V) { |
98 | auto It = TypeToOffsets.find(Val: V.getType()); |
99 | if (It != TypeToOffsets.end()) |
100 | return It->second; |
101 | |
102 | return insertOffsets(V); |
103 | } |
104 | |
105 | const_vreg_iterator findVRegs(const Value &V) const { |
106 | return ValToVRegs.find(Val: &V); |
107 | } |
108 | |
109 | bool contains(const Value &V) const { return ValToVRegs.contains(Val: &V); } |
110 | |
111 | void reset() { |
112 | ValToVRegs.clear(); |
113 | TypeToOffsets.clear(); |
114 | VRegAlloc.DestroyAll(); |
115 | OffsetAlloc.DestroyAll(); |
116 | } |
117 | |
118 | private: |
119 | VRegListT *insertVRegs(const Value &V) { |
120 | assert(!ValToVRegs.contains(&V) && "Value already exists" ); |
121 | |
122 | // We placement new using our fast allocator since we never try to free |
123 | // the vectors until translation is finished. |
124 | auto *VRegList = new (VRegAlloc.Allocate()) VRegListT(); |
125 | ValToVRegs[&V] = VRegList; |
126 | return VRegList; |
127 | } |
128 | |
129 | OffsetListT *insertOffsets(const Value &V) { |
130 | assert(!TypeToOffsets.contains(V.getType()) && "Type already exists" ); |
131 | |
132 | auto *OffsetList = new (OffsetAlloc.Allocate()) OffsetListT(); |
133 | TypeToOffsets[V.getType()] = OffsetList; |
134 | return OffsetList; |
135 | } |
136 | SpecificBumpPtrAllocator<VRegListT> VRegAlloc; |
137 | SpecificBumpPtrAllocator<OffsetListT> OffsetAlloc; |
138 | |
139 | // We store pointers to vectors here since references may be invalidated |
140 | // while we hold them if we stored the vectors directly. |
141 | DenseMap<const Value *, VRegListT*> ValToVRegs; |
142 | DenseMap<const Type *, OffsetListT*> TypeToOffsets; |
143 | }; |
144 | |
145 | /// Mapping of the values of the current LLVM IR function to the related |
146 | /// virtual registers and offsets. |
147 | ValueToVRegInfo VMap; |
148 | |
149 | // N.b. it's not completely obvious that this will be sufficient for every |
150 | // LLVM IR construct (with "invoke" being the obvious candidate to mess up our |
151 | // lives. |
152 | DenseMap<const BasicBlock *, MachineBasicBlock *> BBToMBB; |
153 | |
154 | // One BasicBlock can be translated to multiple MachineBasicBlocks. For such |
155 | // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains |
156 | // a mapping between the edges arriving at the BasicBlock to the corresponding |
157 | // created MachineBasicBlocks. Some BasicBlocks that get translated to a |
158 | // single MachineBasicBlock may also end up in this Map. |
159 | using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>; |
160 | DenseMap<CFGEdge, SmallVector<MachineBasicBlock *, 1>> MachinePreds; |
161 | |
162 | // List of stubbed PHI instructions, for values and basic blocks to be filled |
163 | // in once all MachineBasicBlocks have been created. |
164 | SmallVector<std::pair<const PHINode *, SmallVector<MachineInstr *, 1>>, 4> |
165 | PendingPHIs; |
166 | |
167 | /// Record of what frame index has been allocated to specified allocas for |
168 | /// this function. |
169 | DenseMap<const AllocaInst *, int> FrameIndices; |
170 | |
171 | SwiftErrorValueTracking SwiftError; |
172 | |
173 | /// \name Methods for translating form LLVM IR to MachineInstr. |
174 | /// \see ::translate for general information on the translate methods. |
175 | /// @{ |
176 | |
177 | /// Translate \p Inst into its corresponding MachineInstr instruction(s). |
178 | /// Insert the newly translated instruction(s) right where the CurBuilder |
179 | /// is set. |
180 | /// |
181 | /// The general algorithm is: |
182 | /// 1. Look for a virtual register for each operand or |
183 | /// create one. |
184 | /// 2 Update the VMap accordingly. |
185 | /// 2.alt. For constant arguments, if they are compile time constants, |
186 | /// produce an immediate in the right operand and do not touch |
187 | /// ValToReg. Actually we will go with a virtual register for each |
188 | /// constants because it may be expensive to actually materialize the |
189 | /// constant. Moreover, if the constant spans on several instructions, |
190 | /// CSE may not catch them. |
191 | /// => Update ValToVReg and remember that we saw a constant in Constants. |
192 | /// We will materialize all the constants in finalize. |
193 | /// Note: we would need to do something so that we can recognize such operand |
194 | /// as constants. |
195 | /// 3. Create the generic instruction. |
196 | /// |
197 | /// \return true if the translation succeeded. |
198 | bool translate(const Instruction &Inst); |
199 | |
200 | /// Materialize \p C into virtual-register \p Reg. The generic instructions |
201 | /// performing this materialization will be inserted into the entry block of |
202 | /// the function. |
203 | /// |
204 | /// \return true if the materialization succeeded. |
205 | bool translate(const Constant &C, Register Reg); |
206 | |
207 | /// Examine any debug-info attached to the instruction (in the form of |
208 | /// DPValues) and translate it. |
209 | void translateDbgInfo(const Instruction &Inst, |
210 | MachineIRBuilder &MIRBuilder); |
211 | |
212 | /// Translate a debug-info record of a dbg.value into a DBG_* instruction. |
213 | /// Pass in all the contents of the record, rather than relying on how it's |
214 | /// stored. |
215 | void translateDbgValueRecord(Value *V, bool HasArgList, |
216 | const DILocalVariable *Variable, |
217 | const DIExpression *Expression, const DebugLoc &DL, |
218 | MachineIRBuilder &MIRBuilder); |
219 | |
220 | /// Translate a debug-info record of a dbg.declare into an indirect DBG_* |
221 | /// instruction. Pass in all the contents of the record, rather than relying |
222 | /// on how it's stored. |
223 | void translateDbgDeclareRecord(Value *Address, bool HasArgList, |
224 | const DILocalVariable *Variable, |
225 | const DIExpression *Expression, const DebugLoc &DL, |
226 | MachineIRBuilder &MIRBuilder); |
227 | |
228 | // Translate U as a copy of V. |
229 | bool translateCopy(const User &U, const Value &V, |
230 | MachineIRBuilder &MIRBuilder); |
231 | |
232 | /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is |
233 | /// emitted. |
234 | bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder); |
235 | |
236 | /// Translate an LLVM load instruction into generic IR. |
237 | bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder); |
238 | |
239 | /// Translate an LLVM store instruction into generic IR. |
240 | bool translateStore(const User &U, MachineIRBuilder &MIRBuilder); |
241 | |
242 | /// Translate an LLVM string intrinsic (memcpy, memset, ...). |
243 | bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder, |
244 | unsigned Opcode); |
245 | |
246 | void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder); |
247 | |
248 | bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op, |
249 | MachineIRBuilder &MIRBuilder); |
250 | bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI, |
251 | MachineIRBuilder &MIRBuilder); |
252 | |
253 | /// Helper function for translateSimpleIntrinsic. |
254 | /// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a |
255 | /// simple intrinsic (ceil, fabs, etc.). Otherwise, returns |
256 | /// Intrinsic::not_intrinsic. |
257 | unsigned getSimpleIntrinsicOpcode(Intrinsic::ID ID); |
258 | |
259 | /// Translates the intrinsics defined in getSimpleIntrinsicOpcode. |
260 | /// \return true if the translation succeeded. |
261 | bool translateSimpleIntrinsic(const CallInst &CI, Intrinsic::ID ID, |
262 | MachineIRBuilder &MIRBuilder); |
263 | |
264 | bool translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI, |
265 | MachineIRBuilder &MIRBuilder); |
266 | |
267 | bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, |
268 | MachineIRBuilder &MIRBuilder); |
269 | |
270 | /// Returns the single livein physical register Arg was lowered to, if |
271 | /// possible. |
272 | std::optional<MCRegister> getArgPhysReg(Argument &Arg); |
273 | |
274 | /// If debug-info targets an Argument and its expression is an EntryValue, |
275 | /// lower it as either an entry in the MF debug table (dbg.declare), or a |
276 | /// DBG_VALUE targeting the corresponding livein register for that Argument |
277 | /// (dbg.value). |
278 | bool translateIfEntryValueArgument(bool isDeclare, Value *Arg, |
279 | const DILocalVariable *Var, |
280 | const DIExpression *Expr, |
281 | const DebugLoc &DL, |
282 | MachineIRBuilder &MIRBuilder); |
283 | |
284 | bool translateInlineAsm(const CallBase &CB, MachineIRBuilder &MIRBuilder); |
285 | |
286 | /// Common code for translating normal calls or invokes. |
287 | bool translateCallBase(const CallBase &CB, MachineIRBuilder &MIRBuilder); |
288 | |
289 | /// Translate call instruction. |
290 | /// \pre \p U is a call instruction. |
291 | bool translateCall(const User &U, MachineIRBuilder &MIRBuilder); |
292 | |
293 | /// When an invoke or a cleanupret unwinds to the next EH pad, there are |
294 | /// many places it could ultimately go. In the IR, we have a single unwind |
295 | /// destination, but in the machine CFG, we enumerate all the possible blocks. |
296 | /// This function skips over imaginary basic blocks that hold catchswitch |
297 | /// instructions, and finds all the "real" machine |
298 | /// basic block destinations. As those destinations may not be successors of |
299 | /// EHPadBB, here we also calculate the edge probability to those |
300 | /// destinations. The passed-in Prob is the edge probability to EHPadBB. |
301 | bool findUnwindDestinations( |
302 | const BasicBlock *EHPadBB, BranchProbability Prob, |
303 | SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>> |
304 | &UnwindDests); |
305 | |
306 | bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder); |
307 | |
308 | bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder); |
309 | |
310 | bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder); |
311 | |
312 | /// Translate one of LLVM's cast instructions into MachineInstrs, with the |
313 | /// given generic Opcode. |
314 | bool translateCast(unsigned Opcode, const User &U, |
315 | MachineIRBuilder &MIRBuilder); |
316 | |
317 | /// Translate a phi instruction. |
318 | bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder); |
319 | |
320 | /// Translate a comparison (icmp or fcmp) instruction or constant. |
321 | bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder); |
322 | |
323 | /// Translate an integer compare instruction (or constant). |
324 | bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) { |
325 | return translateCompare(U, MIRBuilder); |
326 | } |
327 | |
328 | /// Translate a floating-point compare instruction (or constant). |
329 | bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) { |
330 | return translateCompare(U, MIRBuilder); |
331 | } |
332 | |
333 | /// Add remaining operands onto phis we've translated. Executed after all |
334 | /// MachineBasicBlocks for the function have been created. |
335 | void finishPendingPhis(); |
336 | |
337 | /// Translate \p Inst into a unary operation \p Opcode. |
338 | /// \pre \p U is a unary operation. |
339 | bool translateUnaryOp(unsigned Opcode, const User &U, |
340 | MachineIRBuilder &MIRBuilder); |
341 | |
342 | /// Translate \p Inst into a binary operation \p Opcode. |
343 | /// \pre \p U is a binary operation. |
344 | bool translateBinaryOp(unsigned Opcode, const User &U, |
345 | MachineIRBuilder &MIRBuilder); |
346 | |
347 | /// If the set of cases should be emitted as a series of branches, return |
348 | /// true. If we should emit this as a bunch of and/or'd together conditions, |
349 | /// return false. |
350 | bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases); |
351 | /// Helper method for findMergedConditions. |
352 | /// This function emits a branch and is used at the leaves of an OR or an |
353 | /// AND operator tree. |
354 | void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, |
355 | MachineBasicBlock *FBB, |
356 | MachineBasicBlock *CurBB, |
357 | MachineBasicBlock *SwitchBB, |
358 | BranchProbability TProb, |
359 | BranchProbability FProb, bool InvertCond); |
360 | /// Used during condbr translation to find trees of conditions that can be |
361 | /// optimized. |
362 | void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB, |
363 | MachineBasicBlock *FBB, MachineBasicBlock *CurBB, |
364 | MachineBasicBlock *SwitchBB, |
365 | Instruction::BinaryOps Opc, BranchProbability TProb, |
366 | BranchProbability FProb, bool InvertCond); |
367 | |
368 | /// Translate branch (br) instruction. |
369 | /// \pre \p U is a branch instruction. |
370 | bool translateBr(const User &U, MachineIRBuilder &MIRBuilder); |
371 | |
372 | // Begin switch lowering functions. |
373 | bool (SwitchCG::JumpTable &JT, |
374 | SwitchCG::JumpTableHeader &JTH, |
375 | MachineBasicBlock *); |
376 | void emitJumpTable(SwitchCG::JumpTable &JT, MachineBasicBlock *MBB); |
377 | |
378 | void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB, |
379 | MachineIRBuilder &MIB); |
380 | |
381 | /// Generate for the BitTest header block, which precedes each sequence of |
382 | /// BitTestCases. |
383 | void (SwitchCG::BitTestBlock &BTB, |
384 | MachineBasicBlock *SwitchMBB); |
385 | /// Generate code to produces one "bit test" for a given BitTestCase \p B. |
386 | void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, |
387 | BranchProbability BranchProbToNext, Register Reg, |
388 | SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB); |
389 | |
390 | void splitWorkItem(SwitchCG::SwitchWorkList &WorkList, |
391 | const SwitchCG::SwitchWorkListItem &W, Value *Cond, |
392 | MachineBasicBlock *SwitchMBB, MachineIRBuilder &MIB); |
393 | |
394 | bool lowerJumpTableWorkItem( |
395 | SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB, |
396 | MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB, |
397 | MachineIRBuilder &MIB, MachineFunction::iterator BBI, |
398 | BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I, |
399 | MachineBasicBlock *Fallthrough, bool FallthroughUnreachable); |
400 | |
401 | bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond, |
402 | MachineBasicBlock *Fallthrough, |
403 | bool FallthroughUnreachable, |
404 | BranchProbability UnhandledProbs, |
405 | MachineBasicBlock *CurMBB, |
406 | MachineIRBuilder &MIB, |
407 | MachineBasicBlock *SwitchMBB); |
408 | |
409 | bool lowerBitTestWorkItem( |
410 | SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB, |
411 | MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB, |
412 | MachineIRBuilder &MIB, MachineFunction::iterator BBI, |
413 | BranchProbability DefaultProb, BranchProbability UnhandledProbs, |
414 | SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough, |
415 | bool FallthroughUnreachable); |
416 | |
417 | bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond, |
418 | MachineBasicBlock *SwitchMBB, |
419 | MachineBasicBlock *DefaultMBB, |
420 | MachineIRBuilder &MIB); |
421 | |
422 | bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder); |
423 | // End switch lowering section. |
424 | |
425 | bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder); |
426 | |
427 | bool (const User &U, MachineIRBuilder &MIRBuilder); |
428 | |
429 | bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder); |
430 | |
431 | bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder); |
432 | |
433 | bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder); |
434 | |
435 | bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder); |
436 | |
437 | /// Translate return (ret) instruction. |
438 | /// The target needs to implement CallLowering::lowerReturn for |
439 | /// this to succeed. |
440 | /// \pre \p U is a return instruction. |
441 | bool translateRet(const User &U, MachineIRBuilder &MIRBuilder); |
442 | |
443 | bool translateFNeg(const User &U, MachineIRBuilder &MIRBuilder); |
444 | |
445 | bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) { |
446 | return translateBinaryOp(Opcode: TargetOpcode::G_ADD, U, MIRBuilder); |
447 | } |
448 | bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) { |
449 | return translateBinaryOp(Opcode: TargetOpcode::G_SUB, U, MIRBuilder); |
450 | } |
451 | bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) { |
452 | return translateBinaryOp(Opcode: TargetOpcode::G_AND, U, MIRBuilder); |
453 | } |
454 | bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) { |
455 | return translateBinaryOp(Opcode: TargetOpcode::G_MUL, U, MIRBuilder); |
456 | } |
457 | bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) { |
458 | return translateBinaryOp(Opcode: TargetOpcode::G_OR, U, MIRBuilder); |
459 | } |
460 | bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) { |
461 | return translateBinaryOp(Opcode: TargetOpcode::G_XOR, U, MIRBuilder); |
462 | } |
463 | |
464 | bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) { |
465 | return translateBinaryOp(Opcode: TargetOpcode::G_UDIV, U, MIRBuilder); |
466 | } |
467 | bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) { |
468 | return translateBinaryOp(Opcode: TargetOpcode::G_SDIV, U, MIRBuilder); |
469 | } |
470 | bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) { |
471 | return translateBinaryOp(Opcode: TargetOpcode::G_UREM, U, MIRBuilder); |
472 | } |
473 | bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) { |
474 | return translateBinaryOp(Opcode: TargetOpcode::G_SREM, U, MIRBuilder); |
475 | } |
476 | bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) { |
477 | return translateCast(Opcode: TargetOpcode::G_INTTOPTR, U, MIRBuilder); |
478 | } |
479 | bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) { |
480 | return translateCast(Opcode: TargetOpcode::G_PTRTOINT, U, MIRBuilder); |
481 | } |
482 | bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) { |
483 | return translateCast(Opcode: TargetOpcode::G_TRUNC, U, MIRBuilder); |
484 | } |
485 | bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) { |
486 | return translateCast(Opcode: TargetOpcode::G_FPTRUNC, U, MIRBuilder); |
487 | } |
488 | bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) { |
489 | return translateCast(Opcode: TargetOpcode::G_FPEXT, U, MIRBuilder); |
490 | } |
491 | bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) { |
492 | return translateCast(Opcode: TargetOpcode::G_FPTOUI, U, MIRBuilder); |
493 | } |
494 | bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) { |
495 | return translateCast(Opcode: TargetOpcode::G_FPTOSI, U, MIRBuilder); |
496 | } |
497 | bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) { |
498 | return translateCast(Opcode: TargetOpcode::G_UITOFP, U, MIRBuilder); |
499 | } |
500 | bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) { |
501 | return translateCast(Opcode: TargetOpcode::G_SITOFP, U, MIRBuilder); |
502 | } |
503 | bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder); |
504 | |
505 | bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) { |
506 | return translateCast(Opcode: TargetOpcode::G_SEXT, U, MIRBuilder); |
507 | } |
508 | |
509 | bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) { |
510 | return translateCast(Opcode: TargetOpcode::G_ZEXT, U, MIRBuilder); |
511 | } |
512 | |
513 | bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) { |
514 | return translateBinaryOp(Opcode: TargetOpcode::G_SHL, U, MIRBuilder); |
515 | } |
516 | bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) { |
517 | return translateBinaryOp(Opcode: TargetOpcode::G_LSHR, U, MIRBuilder); |
518 | } |
519 | bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) { |
520 | return translateBinaryOp(Opcode: TargetOpcode::G_ASHR, U, MIRBuilder); |
521 | } |
522 | |
523 | bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) { |
524 | return translateBinaryOp(Opcode: TargetOpcode::G_FADD, U, MIRBuilder); |
525 | } |
526 | bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) { |
527 | return translateBinaryOp(Opcode: TargetOpcode::G_FSUB, U, MIRBuilder); |
528 | } |
529 | bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) { |
530 | return translateBinaryOp(Opcode: TargetOpcode::G_FMUL, U, MIRBuilder); |
531 | } |
532 | bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) { |
533 | return translateBinaryOp(Opcode: TargetOpcode::G_FDIV, U, MIRBuilder); |
534 | } |
535 | bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) { |
536 | return translateBinaryOp(Opcode: TargetOpcode::G_FREM, U, MIRBuilder); |
537 | } |
538 | |
539 | bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder); |
540 | |
541 | bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder); |
542 | |
543 | bool (const User &U, MachineIRBuilder &MIRBuilder); |
544 | |
545 | bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder); |
546 | |
547 | bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder); |
548 | bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder); |
549 | bool translateFence(const User &U, MachineIRBuilder &MIRBuilder); |
550 | bool translateFreeze(const User &U, MachineIRBuilder &MIRBuilder); |
551 | |
552 | // Stubs to keep the compiler happy while we implement the rest of the |
553 | // translation. |
554 | bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) { |
555 | return false; |
556 | } |
557 | bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) { |
558 | return false; |
559 | } |
560 | bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) { |
561 | return false; |
562 | } |
563 | bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) { |
564 | return false; |
565 | } |
566 | bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) { |
567 | return translateCast(Opcode: TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder); |
568 | } |
569 | bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) { |
570 | return false; |
571 | } |
572 | bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) { |
573 | return false; |
574 | } |
575 | bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) { |
576 | return false; |
577 | } |
578 | bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) { |
579 | return false; |
580 | } |
581 | |
582 | /// @} |
583 | |
584 | // Builder for machine instruction a la IRBuilder. |
585 | // I.e., compared to regular MIBuilder, this one also inserts the instruction |
586 | // in the current block, it can creates block, etc., basically a kind of |
587 | // IRBuilder, but for Machine IR. |
588 | // CSEMIRBuilder CurBuilder; |
589 | std::unique_ptr<MachineIRBuilder> CurBuilder; |
590 | |
591 | // Builder set to the entry block (just after ABI lowering instructions). Used |
592 | // as a convenient location for Constants. |
593 | // CSEMIRBuilder EntryBuilder; |
594 | std::unique_ptr<MachineIRBuilder> EntryBuilder; |
595 | |
596 | // The MachineFunction currently being translated. |
597 | MachineFunction *MF = nullptr; |
598 | |
599 | /// MachineRegisterInfo used to create virtual registers. |
600 | MachineRegisterInfo *MRI = nullptr; |
601 | |
602 | const DataLayout *DL = nullptr; |
603 | |
604 | /// Current target configuration. Controls how the pass handles errors. |
605 | const TargetPassConfig *TPC = nullptr; |
606 | |
607 | CodeGenOptLevel OptLevel; |
608 | |
609 | /// Current optimization remark emitter. Used to report failures. |
610 | std::unique_ptr<OptimizationRemarkEmitter> ORE; |
611 | |
612 | AAResults *AA = nullptr; |
613 | AssumptionCache *AC = nullptr; |
614 | const TargetLibraryInfo *LibInfo = nullptr; |
615 | FunctionLoweringInfo FuncInfo; |
616 | |
617 | // True when either the Target Machine specifies no optimizations or the |
618 | // function has the optnone attribute. |
619 | bool EnableOpts = false; |
620 | |
621 | /// True when the block contains a tail call. This allows the IRTranslator to |
622 | /// stop translating such blocks early. |
623 | bool HasTailCall = false; |
624 | |
625 | StackProtectorDescriptor SPDescriptor; |
626 | |
627 | /// Switch analysis and optimization. |
628 | class GISelSwitchLowering : public SwitchCG::SwitchLowering { |
629 | public: |
630 | GISelSwitchLowering(IRTranslator *irt, FunctionLoweringInfo &funcinfo) |
631 | : SwitchLowering(funcinfo), IRT(irt) { |
632 | assert(irt && "irt is null!" ); |
633 | } |
634 | |
635 | void addSuccessorWithProb( |
636 | MachineBasicBlock *Src, MachineBasicBlock *Dst, |
637 | BranchProbability Prob = BranchProbability::getUnknown()) override { |
638 | IRT->addSuccessorWithProb(Src, Dst, Prob); |
639 | } |
640 | |
641 | virtual ~GISelSwitchLowering() = default; |
642 | |
643 | private: |
644 | IRTranslator *IRT; |
645 | }; |
646 | |
647 | std::unique_ptr<GISelSwitchLowering> SL; |
648 | |
649 | // * Insert all the code needed to materialize the constants |
650 | // at the proper place. E.g., Entry block or dominator block |
651 | // of each constant depending on how fancy we want to be. |
652 | // * Clear the different maps. |
653 | void finalizeFunction(); |
654 | |
655 | // Processing steps done per block. E.g. emitting jump tables, stack |
656 | // protectors etc. Returns true if no errors, false if there was a problem |
657 | // that caused an abort. |
658 | bool finalizeBasicBlock(const BasicBlock &BB, MachineBasicBlock &MBB); |
659 | |
660 | /// Codegen a new tail for a stack protector check ParentMBB which has had its |
661 | /// tail spliced into a stack protector check success bb. |
662 | /// |
663 | /// For a high level explanation of how this fits into the stack protector |
664 | /// generation see the comment on the declaration of class |
665 | /// StackProtectorDescriptor. |
666 | /// |
667 | /// \return true if there were no problems. |
668 | bool emitSPDescriptorParent(StackProtectorDescriptor &SPD, |
669 | MachineBasicBlock *ParentBB); |
670 | |
671 | /// Codegen the failure basic block for a stack protector check. |
672 | /// |
673 | /// A failure stack protector machine basic block consists simply of a call to |
674 | /// __stack_chk_fail(). |
675 | /// |
676 | /// For a high level explanation of how this fits into the stack protector |
677 | /// generation see the comment on the declaration of class |
678 | /// StackProtectorDescriptor. |
679 | /// |
680 | /// \return true if there were no problems. |
681 | bool emitSPDescriptorFailure(StackProtectorDescriptor &SPD, |
682 | MachineBasicBlock *FailureBB); |
683 | |
684 | /// Get the VRegs that represent \p Val. |
685 | /// Non-aggregate types have just one corresponding VReg and the list can be |
686 | /// used as a single "unsigned". Aggregates get flattened. If such VRegs do |
687 | /// not exist, they are created. |
688 | ArrayRef<Register> getOrCreateVRegs(const Value &Val); |
689 | |
690 | Register getOrCreateVReg(const Value &Val) { |
691 | auto Regs = getOrCreateVRegs(Val); |
692 | if (Regs.empty()) |
693 | return 0; |
694 | assert(Regs.size() == 1 && |
695 | "attempt to get single VReg for aggregate or void" ); |
696 | return Regs[0]; |
697 | } |
698 | |
699 | /// Allocate some vregs and offsets in the VMap. Then populate just the |
700 | /// offsets while leaving the vregs empty. |
701 | ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val); |
702 | |
703 | /// Get the frame index that represents \p Val. |
704 | /// If such VReg does not exist, it is created. |
705 | int getOrCreateFrameIndex(const AllocaInst &AI); |
706 | |
707 | /// Get the alignment of the given memory operation instruction. This will |
708 | /// either be the explicitly specified value or the ABI-required alignment for |
709 | /// the type being accessed (according to the Module's DataLayout). |
710 | Align getMemOpAlign(const Instruction &I); |
711 | |
712 | /// Get the MachineBasicBlock that represents \p BB. Specifically, the block |
713 | /// returned will be the head of the translated block (suitable for branch |
714 | /// destinations). |
715 | MachineBasicBlock &getMBB(const BasicBlock &BB); |
716 | |
717 | /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding |
718 | /// to `Edge.first` at the IR level. This is used when IRTranslation creates |
719 | /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer |
720 | /// represented simply by the IR-level CFG. |
721 | void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred); |
722 | |
723 | /// Returns the Machine IR predecessors for the given IR CFG edge. Usually |
724 | /// this is just the single MachineBasicBlock corresponding to the predecessor |
725 | /// in the IR. More complex lowering can result in multiple MachineBasicBlocks |
726 | /// preceding the original though (e.g. switch instructions). |
727 | SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) { |
728 | auto RemappedEdge = MachinePreds.find(Val: Edge); |
729 | if (RemappedEdge != MachinePreds.end()) |
730 | return RemappedEdge->second; |
731 | return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(BB: *Edge.first)); |
732 | } |
733 | |
734 | /// Return branch probability calculated by BranchProbabilityInfo for IR |
735 | /// blocks. |
736 | BranchProbability getEdgeProbability(const MachineBasicBlock *Src, |
737 | const MachineBasicBlock *Dst) const; |
738 | |
739 | void addSuccessorWithProb( |
740 | MachineBasicBlock *Src, MachineBasicBlock *Dst, |
741 | BranchProbability Prob = BranchProbability::getUnknown()); |
742 | |
743 | public: |
744 | IRTranslator(CodeGenOptLevel OptLevel = CodeGenOptLevel::None); |
745 | |
746 | StringRef getPassName() const override { return "IRTranslator" ; } |
747 | |
748 | void getAnalysisUsage(AnalysisUsage &AU) const override; |
749 | |
750 | // Algo: |
751 | // CallLowering = MF.subtarget.getCallLowering() |
752 | // F = MF.getParent() |
753 | // MIRBuilder.reset(MF) |
754 | // getMBB(F.getEntryBB()) |
755 | // CallLowering->translateArguments(MIRBuilder, F, ValToVReg) |
756 | // for each bb in F |
757 | // getMBB(bb) |
758 | // for each inst in bb |
759 | // if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence)) |
760 | // report_fatal_error("Don't know how to translate input"); |
761 | // finalize() |
762 | bool runOnMachineFunction(MachineFunction &MF) override; |
763 | }; |
764 | |
765 | } // end namespace llvm |
766 | |
767 | #endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H |
768 | |