1 | //===-- ARMSubtarget.cpp - ARM Subtarget Information ----------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the ARM specific subclass of TargetSubtargetInfo. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "ARM.h" |
14 | |
15 | #include "ARMCallLowering.h" |
16 | #include "ARMFrameLowering.h" |
17 | #include "ARMInstrInfo.h" |
18 | #include "ARMLegalizerInfo.h" |
19 | #include "ARMRegisterBankInfo.h" |
20 | #include "ARMSubtarget.h" |
21 | #include "ARMTargetMachine.h" |
22 | #include "MCTargetDesc/ARMMCTargetDesc.h" |
23 | #include "Thumb1FrameLowering.h" |
24 | #include "Thumb1InstrInfo.h" |
25 | #include "Thumb2InstrInfo.h" |
26 | #include "llvm/ADT/StringRef.h" |
27 | #include "llvm/ADT/Twine.h" |
28 | #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" |
29 | #include "llvm/CodeGen/MachineFrameInfo.h" |
30 | #include "llvm/CodeGen/MachineFunction.h" |
31 | #include "llvm/IR/Function.h" |
32 | #include "llvm/IR/GlobalValue.h" |
33 | #include "llvm/MC/MCAsmInfo.h" |
34 | #include "llvm/MC/MCTargetOptions.h" |
35 | #include "llvm/Support/CodeGen.h" |
36 | #include "llvm/Support/CommandLine.h" |
37 | #include "llvm/Target/TargetOptions.h" |
38 | #include "llvm/TargetParser/ARMTargetParser.h" |
39 | #include "llvm/TargetParser/Triple.h" |
40 | |
41 | using namespace llvm; |
42 | |
43 | #define DEBUG_TYPE "arm-subtarget" |
44 | |
45 | #define GET_SUBTARGETINFO_TARGET_DESC |
46 | #define GET_SUBTARGETINFO_CTOR |
47 | #include "ARMGenSubtargetInfo.inc" |
48 | |
49 | static cl::opt<bool> |
50 | UseFusedMulOps("arm-use-mulops" , |
51 | cl::init(true), cl::Hidden); |
52 | |
53 | enum ITMode { |
54 | DefaultIT, |
55 | RestrictedIT |
56 | }; |
57 | |
58 | static cl::opt<ITMode> |
59 | IT(cl::desc("IT block support" ), cl::Hidden, cl::init(Val: DefaultIT), |
60 | cl::values(clEnumValN(DefaultIT, "arm-default-it" , |
61 | "Generate any type of IT block" ), |
62 | clEnumValN(RestrictedIT, "arm-restrict-it" , |
63 | "Disallow complex IT blocks" ))); |
64 | |
65 | /// ForceFastISel - Use the fast-isel, even for subtargets where it is not |
66 | /// currently supported (for testing only). |
67 | static cl::opt<bool> |
68 | ForceFastISel("arm-force-fast-isel" , |
69 | cl::init(Val: false), cl::Hidden); |
70 | |
71 | static cl::opt<bool> EnableSubRegLiveness("arm-enable-subreg-liveness" , |
72 | cl::init(Val: false), cl::Hidden); |
73 | |
74 | /// initializeSubtargetDependencies - Initializes using a CPU and feature string |
75 | /// so that we can use initializer lists for subtarget initialization. |
76 | ARMSubtarget &ARMSubtarget::initializeSubtargetDependencies(StringRef CPU, |
77 | StringRef FS) { |
78 | initializeEnvironment(); |
79 | initSubtargetFeatures(CPU, FS); |
80 | return *this; |
81 | } |
82 | |
83 | ARMFrameLowering *ARMSubtarget::initializeFrameLowering(StringRef CPU, |
84 | StringRef FS) { |
85 | ARMSubtarget &STI = initializeSubtargetDependencies(CPU, FS); |
86 | if (STI.isThumb1Only()) |
87 | return (ARMFrameLowering *)new Thumb1FrameLowering(STI); |
88 | |
89 | return new ARMFrameLowering(STI); |
90 | } |
91 | |
92 | ARMSubtarget::ARMSubtarget(const Triple &TT, const std::string &CPU, |
93 | const std::string &FS, |
94 | const ARMBaseTargetMachine &TM, bool IsLittle, |
95 | bool MinSize) |
96 | : ARMGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS), |
97 | UseMulOps(UseFusedMulOps), CPUString(CPU), OptMinSize(MinSize), |
98 | IsLittle(IsLittle), TargetTriple(TT), Options(TM.Options), TM(TM), |
99 | FrameLowering(initializeFrameLowering(CPU, FS)), |
100 | // At this point initializeSubtargetDependencies has been called so |
101 | // we can query directly. |
102 | InstrInfo(isThumb1Only() |
103 | ? (ARMBaseInstrInfo *)new Thumb1InstrInfo(*this) |
104 | : !isThumb() |
105 | ? (ARMBaseInstrInfo *)new ARMInstrInfo(*this) |
106 | : (ARMBaseInstrInfo *)new Thumb2InstrInfo(*this)), |
107 | TLInfo(TM, *this) { |
108 | |
109 | CallLoweringInfo.reset(new ARMCallLowering(*getTargetLowering())); |
110 | Legalizer.reset(new ARMLegalizerInfo(*this)); |
111 | |
112 | auto *RBI = new ARMRegisterBankInfo(*getRegisterInfo()); |
113 | |
114 | // FIXME: At this point, we can't rely on Subtarget having RBI. |
115 | // It's awkward to mix passing RBI and the Subtarget; should we pass |
116 | // TII/TRI as well? |
117 | InstSelector.reset(createARMInstructionSelector( |
118 | *static_cast<const ARMBaseTargetMachine *>(&TM), *this, *RBI)); |
119 | |
120 | RegBankInfo.reset(RBI); |
121 | } |
122 | |
123 | const CallLowering *ARMSubtarget::getCallLowering() const { |
124 | return CallLoweringInfo.get(); |
125 | } |
126 | |
127 | InstructionSelector *ARMSubtarget::getInstructionSelector() const { |
128 | return InstSelector.get(); |
129 | } |
130 | |
131 | const LegalizerInfo *ARMSubtarget::getLegalizerInfo() const { |
132 | return Legalizer.get(); |
133 | } |
134 | |
135 | const RegisterBankInfo *ARMSubtarget::getRegBankInfo() const { |
136 | return RegBankInfo.get(); |
137 | } |
138 | |
139 | bool ARMSubtarget::isXRaySupported() const { |
140 | // We don't currently suppport Thumb, but Windows requires Thumb. |
141 | return hasV6Ops() && hasARMOps() && !isTargetWindows(); |
142 | } |
143 | |
144 | void ARMSubtarget::initializeEnvironment() { |
145 | // MCAsmInfo isn't always present (e.g. in opt) so we can't initialize this |
146 | // directly from it, but we can try to make sure they're consistent when both |
147 | // available. |
148 | UseSjLjEH = (isTargetDarwin() && !isTargetWatchABI() && |
149 | Options.ExceptionModel == ExceptionHandling::None) || |
150 | Options.ExceptionModel == ExceptionHandling::SjLj; |
151 | assert((!TM.getMCAsmInfo() || |
152 | (TM.getMCAsmInfo()->getExceptionHandlingType() == |
153 | ExceptionHandling::SjLj) == UseSjLjEH) && |
154 | "inconsistent sjlj choice between CodeGen and MC" ); |
155 | } |
156 | |
157 | void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) { |
158 | if (CPUString.empty()) { |
159 | CPUString = "generic" ; |
160 | |
161 | if (isTargetDarwin()) { |
162 | StringRef ArchName = TargetTriple.getArchName(); |
163 | ARM::ArchKind AK = ARM::parseArch(Arch: ArchName); |
164 | if (AK == ARM::ArchKind::ARMV7S) |
165 | // Default to the Swift CPU when targeting armv7s/thumbv7s. |
166 | CPUString = "swift" ; |
167 | else if (AK == ARM::ArchKind::ARMV7K) |
168 | // Default to the Cortex-a7 CPU when targeting armv7k/thumbv7k. |
169 | // ARMv7k does not use SjLj exception handling. |
170 | CPUString = "cortex-a7" ; |
171 | } |
172 | } |
173 | |
174 | // Insert the architecture feature derived from the target triple into the |
175 | // feature string. This is important for setting features that are implied |
176 | // based on the architecture version. |
177 | std::string ArchFS = ARM_MC::ParseARMTriple(TT: TargetTriple, CPU: CPUString); |
178 | if (!FS.empty()) { |
179 | if (!ArchFS.empty()) |
180 | ArchFS = (Twine(ArchFS) + "," + FS).str(); |
181 | else |
182 | ArchFS = std::string(FS); |
183 | } |
184 | ParseSubtargetFeatures(CPU: CPUString, /*TuneCPU*/ CPUString, FS: ArchFS); |
185 | |
186 | // FIXME: This used enable V6T2 support implicitly for Thumb2 mode. |
187 | // Assert this for now to make the change obvious. |
188 | assert(hasV6T2Ops() || !hasThumb2()); |
189 | |
190 | if (genExecuteOnly()) { |
191 | // Execute only support for >= v8-M Baseline requires movt support |
192 | if (hasV8MBaselineOps()) |
193 | NoMovt = false; |
194 | if (!hasV6MOps()) |
195 | report_fatal_error(reason: "Cannot generate execute-only code for this target" ); |
196 | } |
197 | |
198 | // Keep a pointer to static instruction cost data for the specified CPU. |
199 | SchedModel = getSchedModelForCPU(CPUString); |
200 | |
201 | // Initialize scheduling itinerary for the specified CPU. |
202 | InstrItins = getInstrItineraryForCPU(CPUString); |
203 | |
204 | // FIXME: this is invalid for WindowsCE |
205 | if (isTargetWindows()) |
206 | NoARM = true; |
207 | |
208 | if (isAAPCS_ABI()) |
209 | stackAlignment = Align(8); |
210 | if (isTargetNaCl() || isAAPCS16_ABI()) |
211 | stackAlignment = Align(16); |
212 | |
213 | // FIXME: Completely disable sibcall for Thumb1 since ThumbRegisterInfo:: |
214 | // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as |
215 | // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation |
216 | // support in the assembler and linker to be used. This would need to be |
217 | // fixed to fully support tail calls in Thumb1. |
218 | // |
219 | // For ARMv8-M, we /do/ implement tail calls. Doing this is tricky for v8-M |
220 | // baseline, since the LDM/POP instruction on Thumb doesn't take LR. This |
221 | // means if we need to reload LR, it takes extra instructions, which outweighs |
222 | // the value of the tail call; but here we don't know yet whether LR is going |
223 | // to be used. We take the optimistic approach of generating the tail call and |
224 | // perhaps taking a hit if we need to restore the LR. |
225 | |
226 | // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, |
227 | // but we need to make sure there are enough registers; the only valid |
228 | // registers are the 4 used for parameters. We don't currently do this |
229 | // case. |
230 | |
231 | SupportsTailCall = !isThumb1Only() || hasV8MBaselineOps(); |
232 | |
233 | if (isTargetMachO() && isTargetIOS() && getTargetTriple().isOSVersionLT(Major: 5, Minor: 0)) |
234 | SupportsTailCall = false; |
235 | |
236 | switch (IT) { |
237 | case DefaultIT: |
238 | RestrictIT = false; |
239 | break; |
240 | case RestrictedIT: |
241 | RestrictIT = true; |
242 | break; |
243 | } |
244 | |
245 | // NEON f32 ops are non-IEEE 754 compliant. Darwin is ok with it by default. |
246 | const FeatureBitset &Bits = getFeatureBits(); |
247 | if ((Bits[ARM::ProcA5] || Bits[ARM::ProcA8]) && // Where this matters |
248 | (Options.UnsafeFPMath || isTargetDarwin())) |
249 | HasNEONForFP = true; |
250 | |
251 | if (isRWPI()) |
252 | ReserveR9 = true; |
253 | |
254 | // If MVEVectorCostFactor is still 0 (has not been set to anything else), default it to 2 |
255 | if (MVEVectorCostFactor == 0) |
256 | MVEVectorCostFactor = 2; |
257 | |
258 | // FIXME: Teach TableGen to deal with these instead of doing it manually here. |
259 | switch (ARMProcFamily) { |
260 | case Others: |
261 | case CortexA5: |
262 | break; |
263 | case CortexA7: |
264 | LdStMultipleTiming = DoubleIssue; |
265 | break; |
266 | case CortexA8: |
267 | LdStMultipleTiming = DoubleIssue; |
268 | break; |
269 | case CortexA9: |
270 | LdStMultipleTiming = DoubleIssueCheckUnalignedAccess; |
271 | PreISelOperandLatencyAdjustment = 1; |
272 | break; |
273 | case CortexA12: |
274 | break; |
275 | case CortexA15: |
276 | MaxInterleaveFactor = 2; |
277 | PreISelOperandLatencyAdjustment = 1; |
278 | PartialUpdateClearance = 12; |
279 | break; |
280 | case CortexA17: |
281 | case CortexA32: |
282 | case CortexA35: |
283 | case CortexA53: |
284 | case CortexA55: |
285 | case CortexA57: |
286 | case CortexA72: |
287 | case CortexA73: |
288 | case CortexA75: |
289 | case CortexA76: |
290 | case CortexA77: |
291 | case CortexA78: |
292 | case CortexA78AE: |
293 | case CortexA78C: |
294 | case CortexA710: |
295 | case CortexR4: |
296 | case CortexR5: |
297 | case CortexR7: |
298 | case CortexM3: |
299 | case CortexM7: |
300 | case CortexR52: |
301 | case CortexX1: |
302 | case CortexX1C: |
303 | break; |
304 | case Exynos: |
305 | LdStMultipleTiming = SingleIssuePlusExtras; |
306 | MaxInterleaveFactor = 4; |
307 | if (!isThumb()) |
308 | PrefLoopLogAlignment = 3; |
309 | break; |
310 | case Kryo: |
311 | break; |
312 | case Krait: |
313 | PreISelOperandLatencyAdjustment = 1; |
314 | break; |
315 | case NeoverseV1: |
316 | break; |
317 | case Swift: |
318 | MaxInterleaveFactor = 2; |
319 | LdStMultipleTiming = SingleIssuePlusExtras; |
320 | PreISelOperandLatencyAdjustment = 1; |
321 | PartialUpdateClearance = 12; |
322 | break; |
323 | } |
324 | } |
325 | |
326 | bool ARMSubtarget::isTargetHardFloat() const { return TM.isTargetHardFloat(); } |
327 | |
328 | bool ARMSubtarget::isAPCS_ABI() const { |
329 | assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN); |
330 | return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_APCS; |
331 | } |
332 | bool ARMSubtarget::isAAPCS_ABI() const { |
333 | assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN); |
334 | return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS || |
335 | TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16; |
336 | } |
337 | bool ARMSubtarget::isAAPCS16_ABI() const { |
338 | assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN); |
339 | return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16; |
340 | } |
341 | |
342 | bool ARMSubtarget::isROPI() const { |
343 | return TM.getRelocationModel() == Reloc::ROPI || |
344 | TM.getRelocationModel() == Reloc::ROPI_RWPI; |
345 | } |
346 | bool ARMSubtarget::isRWPI() const { |
347 | return TM.getRelocationModel() == Reloc::RWPI || |
348 | TM.getRelocationModel() == Reloc::ROPI_RWPI; |
349 | } |
350 | |
351 | bool ARMSubtarget::isGVIndirectSymbol(const GlobalValue *GV) const { |
352 | if (!TM.shouldAssumeDSOLocal(GV)) |
353 | return true; |
354 | |
355 | // 32 bit macho has no relocation for a-b if a is undefined, even if b is in |
356 | // the section that is being relocated. This means we have to use o load even |
357 | // for GVs that are known to be local to the dso. |
358 | if (isTargetMachO() && TM.isPositionIndependent() && |
359 | (GV->isDeclarationForLinker() || GV->hasCommonLinkage())) |
360 | return true; |
361 | |
362 | return false; |
363 | } |
364 | |
365 | bool ARMSubtarget::isGVInGOT(const GlobalValue *GV) const { |
366 | return isTargetELF() && TM.isPositionIndependent() && !GV->isDSOLocal(); |
367 | } |
368 | |
369 | unsigned ARMSubtarget::getMispredictionPenalty() const { |
370 | return SchedModel.MispredictPenalty; |
371 | } |
372 | |
373 | bool ARMSubtarget::enableMachineScheduler() const { |
374 | // The MachineScheduler can increase register usage, so we use more high |
375 | // registers and end up with more T2 instructions that cannot be converted to |
376 | // T1 instructions. At least until we do better at converting to thumb1 |
377 | // instructions, on cortex-m at Oz where we are size-paranoid, don't use the |
378 | // Machine scheduler, relying on the DAG register pressure scheduler instead. |
379 | if (isMClass() && hasMinSize()) |
380 | return false; |
381 | // Enable the MachineScheduler before register allocation for subtargets |
382 | // with the use-misched feature. |
383 | return useMachineScheduler(); |
384 | } |
385 | |
386 | bool ARMSubtarget::enableSubRegLiveness() const { |
387 | if (EnableSubRegLiveness.getNumOccurrences()) |
388 | return EnableSubRegLiveness; |
389 | // Enable SubRegLiveness for MVE to better optimize s subregs for mqpr regs |
390 | // and q subregs for qqqqpr regs. |
391 | return hasMVEIntegerOps(); |
392 | } |
393 | |
394 | bool ARMSubtarget::enableMachinePipeliner() const { |
395 | // Enable the MachinePipeliner before register allocation for subtargets |
396 | // with the use-mipipeliner feature. |
397 | return getSchedModel().hasInstrSchedModel() && useMachinePipeliner(); |
398 | } |
399 | |
400 | bool ARMSubtarget::useDFAforSMS() const { return false; } |
401 | |
402 | // This overrides the PostRAScheduler bit in the SchedModel for any CPU. |
403 | bool ARMSubtarget::enablePostRAScheduler() const { |
404 | if (enableMachineScheduler()) |
405 | return false; |
406 | if (disablePostRAScheduler()) |
407 | return false; |
408 | // Thumb1 cores will generally not benefit from post-ra scheduling |
409 | return !isThumb1Only(); |
410 | } |
411 | |
412 | bool ARMSubtarget::enablePostRAMachineScheduler() const { |
413 | if (!enableMachineScheduler()) |
414 | return false; |
415 | if (disablePostRAScheduler()) |
416 | return false; |
417 | return !isThumb1Only(); |
418 | } |
419 | |
420 | bool ARMSubtarget::useStride4VFPs() const { |
421 | // For general targets, the prologue can grow when VFPs are allocated with |
422 | // stride 4 (more vpush instructions). But WatchOS uses a compact unwind |
423 | // format which it's more important to get right. |
424 | return isTargetWatchABI() || |
425 | (useWideStrideVFP() && !OptMinSize); |
426 | } |
427 | |
428 | bool ARMSubtarget::useMovt() const { |
429 | // NOTE Windows on ARM needs to use mov.w/mov.t pairs to materialise 32-bit |
430 | // immediates as it is inherently position independent, and may be out of |
431 | // range otherwise. |
432 | return !NoMovt && hasV8MBaselineOps() && |
433 | (isTargetWindows() || !OptMinSize || genExecuteOnly()); |
434 | } |
435 | |
436 | bool ARMSubtarget::useFastISel() const { |
437 | // Enable fast-isel for any target, for testing only. |
438 | if (ForceFastISel) |
439 | return true; |
440 | |
441 | // Limit fast-isel to the targets that are or have been tested. |
442 | if (!hasV6Ops()) |
443 | return false; |
444 | |
445 | // Thumb2 support on iOS; ARM support on iOS, Linux and NaCl. |
446 | return TM.Options.EnableFastISel && |
447 | ((isTargetMachO() && !isThumb1Only()) || |
448 | (isTargetLinux() && !isThumb()) || (isTargetNaCl() && !isThumb())); |
449 | } |
450 | |
451 | unsigned ARMSubtarget::getGPRAllocationOrder(const MachineFunction &MF) const { |
452 | // The GPR register class has multiple possible allocation orders, with |
453 | // tradeoffs preferred by different sub-architectures and optimisation goals. |
454 | // The allocation orders are: |
455 | // 0: (the default tablegen order, not used) |
456 | // 1: r14, r0-r13 |
457 | // 2: r0-r7 |
458 | // 3: r0-r7, r12, lr, r8-r11 |
459 | // Note that the register allocator will change this order so that |
460 | // callee-saved registers are used later, as they require extra work in the |
461 | // prologue/epilogue (though we sometimes override that). |
462 | |
463 | // For thumb1-only targets, only the low registers are allocatable. |
464 | if (isThumb1Only()) |
465 | return 2; |
466 | |
467 | // Allocate low registers first, so we can select more 16-bit instructions. |
468 | // We also (in ignoreCSRForAllocationOrder) override the default behaviour |
469 | // with regards to callee-saved registers, because pushing extra registers is |
470 | // much cheaper (in terms of code size) than using high registers. After |
471 | // that, we allocate r12 (doesn't need to be saved), lr (saving it means we |
472 | // can return with the pop, don't need an extra "bx lr") and then the rest of |
473 | // the high registers. |
474 | if (isThumb2() && MF.getFunction().hasMinSize()) |
475 | return 3; |
476 | |
477 | // Otherwise, allocate in the default order, using LR first because saving it |
478 | // allows a shorter epilogue sequence. |
479 | return 1; |
480 | } |
481 | |
482 | bool ARMSubtarget::ignoreCSRForAllocationOrder(const MachineFunction &MF, |
483 | unsigned PhysReg) const { |
484 | // To minimize code size in Thumb2, we prefer the usage of low regs (lower |
485 | // cost per use) so we can use narrow encoding. By default, caller-saved |
486 | // registers (e.g. lr, r12) are always allocated first, regardless of |
487 | // their cost per use. When optForMinSize, we prefer the low regs even if |
488 | // they are CSR because usually push/pop can be folded into existing ones. |
489 | return isThumb2() && MF.getFunction().hasMinSize() && |
490 | ARM::GPRRegClass.contains(PhysReg); |
491 | } |
492 | |
493 | bool ARMSubtarget::splitFramePointerPush(const MachineFunction &MF) const { |
494 | const Function &F = MF.getFunction(); |
495 | if (!MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || |
496 | !F.needsUnwindTableEntry()) |
497 | return false; |
498 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
499 | return MFI.hasVarSizedObjects() || getRegisterInfo()->hasStackRealignment(MF); |
500 | } |
501 | |