1//===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements AArch64 TargetInfo objects.
10//
11//===----------------------------------------------------------------------===//
12
13#include "AArch64.h"
14#include "clang/Basic/LangOptions.h"
15#include "clang/Basic/TargetBuiltins.h"
16#include "clang/Basic/TargetInfo.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/StringExtras.h"
19#include "llvm/ADT/StringSwitch.h"
20#include "llvm/TargetParser/AArch64TargetParser.h"
21#include "llvm/TargetParser/ARMTargetParserCommon.h"
22#include <optional>
23
24using namespace clang;
25using namespace clang::targets;
26
27static constexpr Builtin::Info BuiltinInfo[] = {
28#define BUILTIN(ID, TYPE, ATTRS) \
29 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
30#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
31 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
32#include "clang/Basic/BuiltinsNEON.def"
33
34#define BUILTIN(ID, TYPE, ATTRS) \
35 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
36#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
37 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
38#include "clang/Basic/BuiltinsSVE.def"
39
40#define BUILTIN(ID, TYPE, ATTRS) \
41 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
42#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
43 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
44#include "clang/Basic/BuiltinsSME.def"
45
46#define BUILTIN(ID, TYPE, ATTRS) \
47 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
48#define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
49 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
50#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
51 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
52#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
53 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
54#include "clang/Basic/BuiltinsAArch64.def"
55};
56
57void AArch64TargetInfo::setArchFeatures() {
58 if (*ArchInfo == llvm::AArch64::ARMV8R) {
59 HasDotProd = true;
60 HasDIT = true;
61 HasFlagM = true;
62 HasRCPC = true;
63 FPU |= NeonMode;
64 HasCCPP = true;
65 HasCRC = true;
66 HasLSE = true;
67 HasRDM = true;
68 } else if (ArchInfo->Version.getMajor() == 8) {
69 if (ArchInfo->Version.getMinor() >= 7u) {
70 HasWFxT = true;
71 }
72 if (ArchInfo->Version.getMinor() >= 6u) {
73 HasBFloat16 = true;
74 HasMatMul = true;
75 }
76 if (ArchInfo->Version.getMinor() >= 5u) {
77 HasAlternativeNZCV = true;
78 HasFRInt3264 = true;
79 HasSSBS = true;
80 HasSB = true;
81 HasPredRes = true;
82 HasBTI = true;
83 }
84 if (ArchInfo->Version.getMinor() >= 4u) {
85 HasDotProd = true;
86 HasDIT = true;
87 HasFlagM = true;
88 }
89 if (ArchInfo->Version.getMinor() >= 3u) {
90 HasRCPC = true;
91 FPU |= NeonMode;
92 }
93 if (ArchInfo->Version.getMinor() >= 2u) {
94 HasCCPP = true;
95 }
96 if (ArchInfo->Version.getMinor() >= 1u) {
97 HasCRC = true;
98 HasLSE = true;
99 HasRDM = true;
100 }
101 } else if (ArchInfo->Version.getMajor() == 9) {
102 if (ArchInfo->Version.getMinor() >= 2u) {
103 HasWFxT = true;
104 }
105 if (ArchInfo->Version.getMinor() >= 1u) {
106 HasBFloat16 = true;
107 HasMatMul = true;
108 }
109 FPU |= SveMode;
110 HasSVE2 = true;
111 HasFullFP16 = true;
112 HasAlternativeNZCV = true;
113 HasFRInt3264 = true;
114 HasSSBS = true;
115 HasSB = true;
116 HasPredRes = true;
117 HasBTI = true;
118 HasDotProd = true;
119 HasDIT = true;
120 HasFlagM = true;
121 HasRCPC = true;
122 FPU |= NeonMode;
123 HasCCPP = true;
124 HasCRC = true;
125 HasLSE = true;
126 HasRDM = true;
127 }
128}
129
130AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
131 const TargetOptions &Opts)
132 : TargetInfo(Triple), ABI("aapcs") {
133 if (getTriple().isOSOpenBSD()) {
134 Int64Type = SignedLongLong;
135 IntMaxType = SignedLongLong;
136 } else {
137 if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
138 WCharType = UnsignedInt;
139
140 Int64Type = SignedLong;
141 IntMaxType = SignedLong;
142 }
143
144 // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
145 HasLegalHalfType = true;
146 HalfArgsAndReturns = true;
147 HasFloat16 = true;
148 HasStrictFP = true;
149
150 if (Triple.isArch64Bit())
151 LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
152 else
153 LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
154
155 MaxVectorAlign = 128;
156 MaxAtomicInlineWidth = 128;
157 MaxAtomicPromoteWidth = 128;
158
159 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
160 LongDoubleFormat = &llvm::APFloat::IEEEquad();
161
162 BFloat16Width = BFloat16Align = 16;
163 BFloat16Format = &llvm::APFloat::BFloat();
164
165 // Make __builtin_ms_va_list available.
166 HasBuiltinMSVaList = true;
167
168 // Make the SVE types available. Note that this deliberately doesn't
169 // depend on SveMode, since in principle it should be possible to turn
170 // SVE on and off within a translation unit. It should also be possible
171 // to compile the global declaration:
172 //
173 // __SVInt8_t *ptr;
174 //
175 // even without SVE.
176 HasAArch64SVETypes = true;
177
178 // {} in inline assembly are neon specifiers, not assembly variant
179 // specifiers.
180 NoAsmVariants = true;
181
182 // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
183 // contributes to the alignment of the containing aggregate in the same way
184 // a plain (non bit-field) member of that type would, without exception for
185 // zero-sized or anonymous bit-fields."
186 assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
187 UseZeroLengthBitfieldAlignment = true;
188
189 // AArch64 targets default to using the ARM C++ ABI.
190 TheCXXABI.set(TargetCXXABI::GenericAArch64);
191
192 if (Triple.getOS() == llvm::Triple::Linux)
193 this->MCountName = "\01_mcount";
194 else if (Triple.getOS() == llvm::Triple::UnknownOS)
195 this->MCountName =
196 Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
197}
198
199StringRef AArch64TargetInfo::getABI() const { return ABI; }
200
201bool AArch64TargetInfo::setABI(const std::string &Name) {
202 if (Name != "aapcs" && Name != "darwinpcs")
203 return false;
204
205 ABI = Name;
206 return true;
207}
208
209bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
210 BranchProtectionInfo &BPI,
211 StringRef &Err) const {
212 llvm::ARM::ParsedBranchProtection PBP;
213 if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
214 return false;
215
216 BPI.SignReturnAddr =
217 llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
218 .Case(S: "non-leaf", Value: LangOptions::SignReturnAddressScopeKind::NonLeaf)
219 .Case(S: "all", Value: LangOptions::SignReturnAddressScopeKind::All)
220 .Default(Value: LangOptions::SignReturnAddressScopeKind::None);
221
222 if (PBP.Key == "a_key")
223 BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
224 else
225 BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
226
227 BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
228 BPI.BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
229 BPI.GuardedControlStack = PBP.GuardedControlStack;
230 return true;
231}
232
233bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
234 return Name == "generic" || llvm::AArch64::parseCpu(Name);
235}
236
237bool AArch64TargetInfo::setCPU(const std::string &Name) {
238 return isValidCPUName(Name);
239}
240
241void AArch64TargetInfo::fillValidCPUList(
242 SmallVectorImpl<StringRef> &Values) const {
243 llvm::AArch64::fillValidCPUArchList(Values);
244}
245
246void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
247 MacroBuilder &Builder) const {
248 Builder.defineMacro(Name: "__ARM_FEATURE_QRDMX", Value: "1");
249}
250
251void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
252 MacroBuilder &Builder) const {
253 // Also include the ARMv8.1 defines
254 getTargetDefinesARMV81A(Opts, Builder);
255}
256
257void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
258 MacroBuilder &Builder) const {
259 Builder.defineMacro(Name: "__ARM_FEATURE_COMPLEX", Value: "1");
260 Builder.defineMacro(Name: "__ARM_FEATURE_JCVT", Value: "1");
261 // Also include the Armv8.2 defines
262 getTargetDefinesARMV82A(Opts, Builder);
263}
264
265void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
266 MacroBuilder &Builder) const {
267 // Also include the Armv8.3 defines
268 getTargetDefinesARMV83A(Opts, Builder);
269}
270
271void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
272 MacroBuilder &Builder) const {
273 Builder.defineMacro(Name: "__ARM_FEATURE_FRINT", Value: "1");
274 Builder.defineMacro(Name: "__ARM_FEATURE_BTI", Value: "1");
275 // Also include the Armv8.4 defines
276 getTargetDefinesARMV84A(Opts, Builder);
277}
278
279void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
280 MacroBuilder &Builder) const {
281 // Also include the Armv8.5 defines
282 // FIXME: Armv8.6 makes the following extensions mandatory:
283 // - __ARM_FEATURE_BF16
284 // - __ARM_FEATURE_MATMUL_INT8
285 // Handle them here.
286 getTargetDefinesARMV85A(Opts, Builder);
287}
288
289void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
290 MacroBuilder &Builder) const {
291 // Also include the Armv8.6 defines
292 getTargetDefinesARMV86A(Opts, Builder);
293}
294
295void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
296 MacroBuilder &Builder) const {
297 // Also include the Armv8.7 defines
298 getTargetDefinesARMV87A(Opts, Builder);
299}
300
301void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions &Opts,
302 MacroBuilder &Builder) const {
303 // Also include the Armv8.8 defines
304 getTargetDefinesARMV88A(Opts, Builder);
305}
306
307void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
308 MacroBuilder &Builder) const {
309 // Armv9-A maps to Armv8.5-A
310 getTargetDefinesARMV85A(Opts, Builder);
311}
312
313void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
314 MacroBuilder &Builder) const {
315 // Armv9.1-A maps to Armv8.6-A
316 getTargetDefinesARMV86A(Opts, Builder);
317}
318
319void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
320 MacroBuilder &Builder) const {
321 // Armv9.2-A maps to Armv8.7-A
322 getTargetDefinesARMV87A(Opts, Builder);
323}
324
325void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
326 MacroBuilder &Builder) const {
327 // Armv9.3-A maps to Armv8.8-A
328 getTargetDefinesARMV88A(Opts, Builder);
329}
330
331void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions &Opts,
332 MacroBuilder &Builder) const {
333 // Armv9.4-A maps to Armv8.9-A
334 getTargetDefinesARMV89A(Opts, Builder);
335}
336
337void AArch64TargetInfo::getTargetDefinesARMV95A(const LangOptions &Opts,
338 MacroBuilder &Builder) const {
339 // Armv9.5-A does not have a v8.* equivalent, but is a superset of v9.4-A.
340 getTargetDefinesARMV94A(Opts, Builder);
341}
342
343void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
344 MacroBuilder &Builder) const {
345 // Target identification.
346 if (getTriple().isWindowsArm64EC()) {
347 // Define the same set of macros as would be defined on x86_64 to ensure that
348 // ARM64EC datatype layouts match those of x86_64 compiled code
349 Builder.defineMacro(Name: "__amd64__");
350 Builder.defineMacro(Name: "__amd64");
351 Builder.defineMacro(Name: "__x86_64");
352 Builder.defineMacro(Name: "__x86_64__");
353 Builder.defineMacro(Name: "__arm64ec__");
354 } else {
355 Builder.defineMacro(Name: "__aarch64__");
356 }
357
358 // Inline assembly supports AArch64 flag outputs.
359 Builder.defineMacro(Name: "__GCC_ASM_FLAG_OUTPUTS__");
360
361 std::string CodeModel = getTargetOpts().CodeModel;
362 if (CodeModel == "default")
363 CodeModel = "small";
364 for (char &c : CodeModel)
365 c = toupper(c: c);
366 Builder.defineMacro(Name: "__AARCH64_CMODEL_" + CodeModel + "__");
367
368 // ACLE predefines. Many can only have one possible value on v8 AArch64.
369 Builder.defineMacro(Name: "__ARM_ACLE", Value: "200");
370
371 // __ARM_ARCH is defined as an integer value indicating the current ARM ISA.
372 // For ISAs up to and including v8, __ARM_ARCH is equal to the major version
373 // number. For ISAs from v8.1 onwards, __ARM_ARCH is scaled up to include the
374 // minor version number, e.g. for ARM architecture ARMvX.Y:
375 // __ARM_ARCH = X * 100 + Y.
376 if (ArchInfo->Version.getMajor() == 8 && ArchInfo->Version.getMinor() == 0)
377 Builder.defineMacro(Name: "__ARM_ARCH",
378 Value: std::to_string(val: ArchInfo->Version.getMajor()));
379 else
380 Builder.defineMacro(Name: "__ARM_ARCH",
381 Value: std::to_string(val: ArchInfo->Version.getMajor() * 100 +
382 ArchInfo->Version.getMinor().value()));
383
384 Builder.defineMacro(Name: "__ARM_ARCH_PROFILE",
385 Value: std::string("'") + (char)ArchInfo->Profile + "'");
386
387 Builder.defineMacro(Name: "__ARM_64BIT_STATE", Value: "1");
388 Builder.defineMacro(Name: "__ARM_PCS_AAPCS64", Value: "1");
389 Builder.defineMacro(Name: "__ARM_ARCH_ISA_A64", Value: "1");
390
391 Builder.defineMacro(Name: "__ARM_FEATURE_CLZ", Value: "1");
392 Builder.defineMacro(Name: "__ARM_FEATURE_FMA", Value: "1");
393 Builder.defineMacro(Name: "__ARM_FEATURE_LDREX", Value: "0xF");
394 Builder.defineMacro(Name: "__ARM_FEATURE_IDIV", Value: "1"); // As specified in ACLE
395 Builder.defineMacro(Name: "__ARM_FEATURE_DIV"); // For backwards compatibility
396 Builder.defineMacro(Name: "__ARM_FEATURE_NUMERIC_MAXMIN", Value: "1");
397 Builder.defineMacro(Name: "__ARM_FEATURE_DIRECTED_ROUNDING", Value: "1");
398
399 Builder.defineMacro(Name: "__ARM_ALIGN_MAX_STACK_PWR", Value: "4");
400
401 // These macros are set when Clang can parse declarations with these
402 // attributes.
403 Builder.defineMacro(Name: "__ARM_STATE_ZA", Value: "1");
404 Builder.defineMacro(Name: "__ARM_STATE_ZT0", Value: "1");
405
406 // 0xe implies support for half, single and double precision operations.
407 if (FPU & FPUMode)
408 Builder.defineMacro(Name: "__ARM_FP", Value: "0xE");
409
410 // PCS specifies this for SysV variants, which is all we support. Other ABIs
411 // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
412 Builder.defineMacro(Name: "__ARM_FP16_FORMAT_IEEE", Value: "1");
413 Builder.defineMacro(Name: "__ARM_FP16_ARGS", Value: "1");
414
415 if (Opts.UnsafeFPMath)
416 Builder.defineMacro(Name: "__ARM_FP_FAST", Value: "1");
417
418 Builder.defineMacro(Name: "__ARM_SIZEOF_WCHAR_T",
419 Value: Twine(Opts.WCharSize ? Opts.WCharSize : 4));
420
421 Builder.defineMacro(Name: "__ARM_SIZEOF_MINIMAL_ENUM", Value: Opts.ShortEnums ? "1" : "4");
422
423 if (FPU & NeonMode) {
424 Builder.defineMacro(Name: "__ARM_NEON", Value: "1");
425 // 64-bit NEON supports half, single and double precision operations.
426 Builder.defineMacro(Name: "__ARM_NEON_FP", Value: "0xE");
427 }
428
429 if (FPU & SveMode)
430 Builder.defineMacro(Name: "__ARM_FEATURE_SVE", Value: "1");
431
432 if ((FPU & NeonMode) && (FPU & SveMode))
433 Builder.defineMacro(Name: "__ARM_NEON_SVE_BRIDGE", Value: "1");
434
435 if (HasSVE2)
436 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2", Value: "1");
437
438 if (HasSVE2 && HasSVE2AES)
439 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_AES", Value: "1");
440
441 if (HasSVE2 && HasSVE2BitPerm)
442 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_BITPERM", Value: "1");
443
444 if (HasSVE2 && HasSVE2SHA3)
445 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_SHA3", Value: "1");
446
447 if (HasSVE2 && HasSVE2SM4)
448 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_SM4", Value: "1");
449
450 if (HasSME) {
451 Builder.defineMacro(Name: "__ARM_FEATURE_SME");
452 Builder.defineMacro(Name: "__ARM_FEATURE_LOCALLY_STREAMING", Value: "1");
453 }
454
455 if (HasSME2) {
456 Builder.defineMacro(Name: "__ARM_FEATURE_SME");
457 Builder.defineMacro(Name: "__ARM_FEATURE_SME2");
458 Builder.defineMacro(Name: "__ARM_FEATURE_LOCALLY_STREAMING", Value: "1");
459 }
460
461 if (HasCRC)
462 Builder.defineMacro(Name: "__ARM_FEATURE_CRC32", Value: "1");
463
464 if (HasRCPC3)
465 Builder.defineMacro(Name: "__ARM_FEATURE_RCPC", Value: "3");
466 else if (HasRCPC)
467 Builder.defineMacro(Name: "__ARM_FEATURE_RCPC", Value: "1");
468
469 if (HasFMV)
470 Builder.defineMacro(Name: "__HAVE_FUNCTION_MULTI_VERSIONING", Value: "1");
471
472 // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
473 // macros for AES, SHA2, SHA3 and SM4
474 if (HasAES && HasSHA2)
475 Builder.defineMacro(Name: "__ARM_FEATURE_CRYPTO", Value: "1");
476
477 if (HasAES)
478 Builder.defineMacro(Name: "__ARM_FEATURE_AES", Value: "1");
479
480 if (HasSHA2)
481 Builder.defineMacro(Name: "__ARM_FEATURE_SHA2", Value: "1");
482
483 if (HasSHA3) {
484 Builder.defineMacro(Name: "__ARM_FEATURE_SHA3", Value: "1");
485 Builder.defineMacro(Name: "__ARM_FEATURE_SHA512", Value: "1");
486 }
487
488 if (HasSM4) {
489 Builder.defineMacro(Name: "__ARM_FEATURE_SM3", Value: "1");
490 Builder.defineMacro(Name: "__ARM_FEATURE_SM4", Value: "1");
491 }
492
493 if (HasPAuth)
494 Builder.defineMacro(Name: "__ARM_FEATURE_PAUTH", Value: "1");
495
496 if (HasPAuthLR)
497 Builder.defineMacro(Name: "__ARM_FEATURE_PAUTH_LR", Value: "1");
498
499 if (HasUnaligned)
500 Builder.defineMacro(Name: "__ARM_FEATURE_UNALIGNED", Value: "1");
501
502 if ((FPU & NeonMode) && HasFullFP16)
503 Builder.defineMacro(Name: "__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", Value: "1");
504 if (HasFullFP16)
505 Builder.defineMacro(Name: "__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", Value: "1");
506
507 if (HasDotProd)
508 Builder.defineMacro(Name: "__ARM_FEATURE_DOTPROD", Value: "1");
509
510 if (HasMTE)
511 Builder.defineMacro(Name: "__ARM_FEATURE_MEMORY_TAGGING", Value: "1");
512
513 if (HasTME)
514 Builder.defineMacro(Name: "__ARM_FEATURE_TME", Value: "1");
515
516 if (HasMatMul)
517 Builder.defineMacro(Name: "__ARM_FEATURE_MATMUL_INT8", Value: "1");
518
519 if (HasLSE)
520 Builder.defineMacro(Name: "__ARM_FEATURE_ATOMICS", Value: "1");
521
522 if (HasBFloat16) {
523 Builder.defineMacro(Name: "__ARM_FEATURE_BF16", Value: "1");
524 Builder.defineMacro(Name: "__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", Value: "1");
525 Builder.defineMacro(Name: "__ARM_BF16_FORMAT_ALTERNATIVE", Value: "1");
526 Builder.defineMacro(Name: "__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", Value: "1");
527 }
528
529 if ((FPU & SveMode) && HasBFloat16) {
530 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_BF16", Value: "1");
531 }
532
533 if ((FPU & SveMode) && HasMatmulFP64)
534 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_MATMUL_FP64", Value: "1");
535
536 if ((FPU & SveMode) && HasMatmulFP32)
537 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_MATMUL_FP32", Value: "1");
538
539 if ((FPU & SveMode) && HasMatMul)
540 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_MATMUL_INT8", Value: "1");
541
542 if ((FPU & NeonMode) && HasFP16FML)
543 Builder.defineMacro(Name: "__ARM_FEATURE_FP16_FML", Value: "1");
544
545 if (Opts.hasSignReturnAddress()) {
546 // Bitmask:
547 // 0: Protection using the A key
548 // 1: Protection using the B key
549 // 2: Protection including leaf functions
550 // 3: Protection using PC as a diversifier
551 unsigned Value = 0;
552
553 if (Opts.isSignReturnAddressWithAKey())
554 Value |= (1 << 0);
555 else
556 Value |= (1 << 1);
557
558 if (Opts.isSignReturnAddressScopeAll())
559 Value |= (1 << 2);
560
561 if (Opts.BranchProtectionPAuthLR)
562 Value |= (1 << 3);
563
564 Builder.defineMacro(Name: "__ARM_FEATURE_PAC_DEFAULT", Value: std::to_string(val: Value));
565 }
566
567 if (Opts.BranchTargetEnforcement)
568 Builder.defineMacro(Name: "__ARM_FEATURE_BTI_DEFAULT", Value: "1");
569
570 if (Opts.GuardedControlStack)
571 Builder.defineMacro(Name: "__ARM_FEATURE_GCS_DEFAULT", Value: "1");
572
573 if (HasLS64)
574 Builder.defineMacro(Name: "__ARM_FEATURE_LS64", Value: "1");
575
576 if (HasRandGen)
577 Builder.defineMacro(Name: "__ARM_FEATURE_RNG", Value: "1");
578
579 if (HasMOPS)
580 Builder.defineMacro(Name: "__ARM_FEATURE_MOPS", Value: "1");
581
582 if (HasD128)
583 Builder.defineMacro(Name: "__ARM_FEATURE_SYSREG128", Value: "1");
584
585 if (HasGCS)
586 Builder.defineMacro(Name: "__ARM_FEATURE_GCS", Value: "1");
587
588 if (*ArchInfo == llvm::AArch64::ARMV8_1A)
589 getTargetDefinesARMV81A(Opts, Builder);
590 else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
591 getTargetDefinesARMV82A(Opts, Builder);
592 else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
593 getTargetDefinesARMV83A(Opts, Builder);
594 else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
595 getTargetDefinesARMV84A(Opts, Builder);
596 else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
597 getTargetDefinesARMV85A(Opts, Builder);
598 else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
599 getTargetDefinesARMV86A(Opts, Builder);
600 else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
601 getTargetDefinesARMV87A(Opts, Builder);
602 else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
603 getTargetDefinesARMV88A(Opts, Builder);
604 else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
605 getTargetDefinesARMV89A(Opts, Builder);
606 else if (*ArchInfo == llvm::AArch64::ARMV9A)
607 getTargetDefinesARMV9A(Opts, Builder);
608 else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
609 getTargetDefinesARMV91A(Opts, Builder);
610 else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
611 getTargetDefinesARMV92A(Opts, Builder);
612 else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
613 getTargetDefinesARMV93A(Opts, Builder);
614 else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
615 getTargetDefinesARMV94A(Opts, Builder);
616 else if (*ArchInfo == llvm::AArch64::ARMV9_5A)
617 getTargetDefinesARMV95A(Opts, Builder);
618
619 // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8|16) builtins work.
620 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
621 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
622 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
623 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
624 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
625
626 // Allow detection of fast FMA support.
627 Builder.defineMacro(Name: "__FP_FAST_FMA", Value: "1");
628 Builder.defineMacro(Name: "__FP_FAST_FMAF", Value: "1");
629
630 // C/C++ operators work on both VLS and VLA SVE types
631 if (FPU & SveMode)
632 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_VECTOR_OPERATORS", Value: "2");
633
634 if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
635 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_BITS", Value: Twine(Opts.VScaleMin * 128));
636 }
637}
638
639ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
640 return llvm::ArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
641 Builtin::FirstTSBuiltin);
642}
643
644std::optional<std::pair<unsigned, unsigned>>
645AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
646 if (LangOpts.VScaleMin || LangOpts.VScaleMax)
647 return std::pair<unsigned, unsigned>(
648 LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
649
650 if (hasFeature(Feature: "sve"))
651 return std::pair<unsigned, unsigned>(1, 16);
652
653 return std::nullopt;
654}
655
656unsigned AArch64TargetInfo::multiVersionSortPriority(StringRef Name) const {
657 if (Name == "default")
658 return 0;
659 if (auto Ext = llvm::AArch64::parseArchExtension(Extension: Name))
660 return Ext->FmvPriority;
661 return 0;
662}
663
664unsigned AArch64TargetInfo::multiVersionFeatureCost() const {
665 // Take the maximum priority as per feature cost, so more features win.
666 return llvm::AArch64::ExtensionInfo::MaxFMVPriority;
667}
668
669bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
670 if (auto Ext = llvm::AArch64::parseArchExtension(Extension: Name))
671 return !Ext->DependentFeatures.empty();
672 return false;
673}
674
675StringRef AArch64TargetInfo::getFeatureDependencies(StringRef Name) const {
676 if (auto Ext = llvm::AArch64::parseArchExtension(Extension: Name))
677 return Ext->DependentFeatures;
678 return StringRef();
679}
680
681bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
682 return llvm::AArch64::parseArchExtension(Extension: FeatureStr).has_value();
683}
684
685bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
686 return llvm::StringSwitch<bool>(Feature)
687 .Cases(S0: "aarch64", S1: "arm64", S2: "arm", Value: true)
688 .Case(S: "fmv", Value: HasFMV)
689 .Cases(S0: "neon", S1: "fp", S2: "simd", Value: FPU & NeonMode)
690 .Case(S: "jscvt", Value: HasJSCVT)
691 .Case(S: "fcma", Value: HasFCMA)
692 .Case(S: "rng", Value: HasRandGen)
693 .Case(S: "flagm", Value: HasFlagM)
694 .Case(S: "flagm2", Value: HasAlternativeNZCV)
695 .Case(S: "fp16fml", Value: HasFP16FML)
696 .Case(S: "dotprod", Value: HasDotProd)
697 .Case(S: "sm4", Value: HasSM4)
698 .Case(S: "rdm", Value: HasRDM)
699 .Case(S: "lse", Value: HasLSE)
700 .Case(S: "crc", Value: HasCRC)
701 .Case(S: "sha2", Value: HasSHA2)
702 .Case(S: "sha3", Value: HasSHA3)
703 .Cases(S0: "aes", S1: "pmull", Value: HasAES)
704 .Cases(S0: "fp16", S1: "fullfp16", Value: HasFullFP16)
705 .Case(S: "dit", Value: HasDIT)
706 .Case(S: "dpb", Value: HasCCPP)
707 .Case(S: "dpb2", Value: HasCCDP)
708 .Case(S: "rcpc", Value: HasRCPC)
709 .Case(S: "frintts", Value: HasFRInt3264)
710 .Case(S: "i8mm", Value: HasMatMul)
711 .Case(S: "bf16", Value: HasBFloat16)
712 .Case(S: "sve", Value: FPU & SveMode)
713 .Case(S: "sve-bf16", Value: FPU & SveMode && HasBFloat16)
714 .Case(S: "sve-i8mm", Value: FPU & SveMode && HasMatMul)
715 .Case(S: "f32mm", Value: FPU & SveMode && HasMatmulFP32)
716 .Case(S: "f64mm", Value: FPU & SveMode && HasMatmulFP64)
717 .Case(S: "sve2", Value: FPU & SveMode && HasSVE2)
718 .Case(S: "sve2-pmull128", Value: FPU & SveMode && HasSVE2AES)
719 .Case(S: "sve2-bitperm", Value: FPU & SveMode && HasSVE2BitPerm)
720 .Case(S: "sve2-sha3", Value: FPU & SveMode && HasSVE2SHA3)
721 .Case(S: "sve2-sm4", Value: FPU & SveMode && HasSVE2SM4)
722 .Case(S: "sme", Value: HasSME)
723 .Case(S: "sme2", Value: HasSME2)
724 .Case(S: "sme-f64f64", Value: HasSMEF64F64)
725 .Case(S: "sme-i16i64", Value: HasSMEI16I64)
726 .Case(S: "sme-fa64", Value: HasSMEFA64)
727 .Cases(S0: "memtag", S1: "memtag2", Value: HasMTE)
728 .Case(S: "sb", Value: HasSB)
729 .Case(S: "predres", Value: HasPredRes)
730 .Cases(S0: "ssbs", S1: "ssbs2", Value: HasSSBS)
731 .Case(S: "bti", Value: HasBTI)
732 .Cases(S0: "ls64", S1: "ls64_v", S2: "ls64_accdata", Value: HasLS64)
733 .Case(S: "wfxt", Value: HasWFxT)
734 .Case(S: "rcpc3", Value: HasRCPC3)
735 .Default(Value: false);
736}
737
738void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
739 StringRef Name, bool Enabled) const {
740 Features[Name] = Enabled;
741 // If the feature is an architecture feature (like v8.2a), add all previous
742 // architecture versions and any dependant target features.
743 const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
744 llvm::AArch64::ArchInfo::findBySubArch(SubArch: Name);
745
746 if (!ArchInfo)
747 return; // Not an architecture, nothing more to do.
748
749 // Disabling an architecture feature does not affect dependent features
750 if (!Enabled)
751 return;
752
753 for (const auto *OtherArch : llvm::AArch64::ArchInfos)
754 if (ArchInfo->implies(Other: *OtherArch))
755 Features[OtherArch->getSubArch()] = true;
756
757 // Set any features implied by the architecture
758 std::vector<StringRef> CPUFeats;
759 if (llvm::AArch64::getExtensionFeatures(Extensions: ArchInfo->DefaultExts, Features&: CPUFeats)) {
760 for (auto F : CPUFeats) {
761 assert(F[0] == '+' && "Expected + in target feature!");
762 Features[F.drop_front(N: 1)] = true;
763 }
764 }
765}
766
767bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
768 DiagnosticsEngine &Diags) {
769 for (const auto &Feature : Features) {
770 if (Feature == "-fp-armv8")
771 HasNoFP = true;
772 if (Feature == "-neon")
773 HasNoNeon = true;
774 if (Feature == "-sve")
775 HasNoSVE = true;
776
777 if (Feature == "+neon" || Feature == "+fp-armv8")
778 FPU |= NeonMode;
779 if (Feature == "+jscvt") {
780 HasJSCVT = true;
781 FPU |= NeonMode;
782 }
783 if (Feature == "+fcma") {
784 HasFCMA = true;
785 FPU |= NeonMode;
786 }
787
788 if (Feature == "+sve") {
789 FPU |= NeonMode;
790 FPU |= SveMode;
791 HasFullFP16 = true;
792 }
793 if (Feature == "+sve2") {
794 FPU |= NeonMode;
795 FPU |= SveMode;
796 HasFullFP16 = true;
797 HasSVE2 = true;
798 }
799 if (Feature == "+sve2-aes") {
800 FPU |= NeonMode;
801 FPU |= SveMode;
802 HasFullFP16 = true;
803 HasSVE2 = true;
804 HasSVE2AES = true;
805 }
806 if (Feature == "+sve2-sha3") {
807 FPU |= NeonMode;
808 FPU |= SveMode;
809 HasFullFP16 = true;
810 HasSVE2 = true;
811 HasSVE2SHA3 = true;
812 }
813 if (Feature == "+sve2-sm4") {
814 FPU |= NeonMode;
815 FPU |= SveMode;
816 HasFullFP16 = true;
817 HasSVE2 = true;
818 HasSVE2SM4 = true;
819 }
820 if (Feature == "+sve2-bitperm") {
821 FPU |= NeonMode;
822 FPU |= SveMode;
823 HasFullFP16 = true;
824 HasSVE2 = true;
825 HasSVE2BitPerm = true;
826 }
827 if (Feature == "+f32mm") {
828 FPU |= NeonMode;
829 FPU |= SveMode;
830 HasFullFP16 = true;
831 HasMatmulFP32 = true;
832 }
833 if (Feature == "+f64mm") {
834 FPU |= NeonMode;
835 FPU |= SveMode;
836 HasFullFP16 = true;
837 HasMatmulFP64 = true;
838 }
839 if (Feature == "+sme") {
840 HasSME = true;
841 HasBFloat16 = true;
842 HasFullFP16 = true;
843 }
844 if (Feature == "+sme2") {
845 HasSME = true;
846 HasSME2 = true;
847 HasBFloat16 = true;
848 HasFullFP16 = true;
849 }
850 if (Feature == "+sme-f64f64") {
851 HasSME = true;
852 HasSMEF64F64 = true;
853 HasBFloat16 = true;
854 HasFullFP16 = true;
855 }
856 if (Feature == "+sme-i16i64") {
857 HasSME = true;
858 HasSMEI16I64 = true;
859 HasBFloat16 = true;
860 HasFullFP16 = true;
861 }
862 if (Feature == "+sme-fa64") {
863 FPU |= NeonMode;
864 FPU |= SveMode;
865 HasSME = true;
866 HasSVE2 = true;
867 HasSMEFA64 = true;
868 }
869 if (Feature == "+sb")
870 HasSB = true;
871 if (Feature == "+predres")
872 HasPredRes = true;
873 if (Feature == "+ssbs")
874 HasSSBS = true;
875 if (Feature == "+bti")
876 HasBTI = true;
877 if (Feature == "+wfxt")
878 HasWFxT = true;
879 if (Feature == "-fmv")
880 HasFMV = false;
881 if (Feature == "+crc")
882 HasCRC = true;
883 if (Feature == "+rcpc")
884 HasRCPC = true;
885 if (Feature == "+aes") {
886 FPU |= NeonMode;
887 HasAES = true;
888 }
889 if (Feature == "+sha2") {
890 FPU |= NeonMode;
891 HasSHA2 = true;
892 }
893 if (Feature == "+sha3") {
894 FPU |= NeonMode;
895 HasSHA2 = true;
896 HasSHA3 = true;
897 }
898 if (Feature == "+rdm") {
899 FPU |= NeonMode;
900 HasRDM = true;
901 }
902 if (Feature == "+dit")
903 HasDIT = true;
904 if (Feature == "+cccp")
905 HasCCPP = true;
906 if (Feature == "+ccdp") {
907 HasCCPP = true;
908 HasCCDP = true;
909 }
910 if (Feature == "+fptoint")
911 HasFRInt3264 = true;
912 if (Feature == "+sm4") {
913 FPU |= NeonMode;
914 HasSM4 = true;
915 }
916 if (Feature == "+strict-align")
917 HasUnaligned = false;
918 // All predecessor archs are added but select the latest one for ArchKind.
919 if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
920 ArchInfo = &llvm::AArch64::ARMV8A;
921 if (Feature == "+v8.1a" &&
922 ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
923 ArchInfo = &llvm::AArch64::ARMV8_1A;
924 if (Feature == "+v8.2a" &&
925 ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
926 ArchInfo = &llvm::AArch64::ARMV8_2A;
927 if (Feature == "+v8.3a" &&
928 ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
929 ArchInfo = &llvm::AArch64::ARMV8_3A;
930 if (Feature == "+v8.4a" &&
931 ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
932 ArchInfo = &llvm::AArch64::ARMV8_4A;
933 if (Feature == "+v8.5a" &&
934 ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
935 ArchInfo = &llvm::AArch64::ARMV8_5A;
936 if (Feature == "+v8.6a" &&
937 ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
938 ArchInfo = &llvm::AArch64::ARMV8_6A;
939 if (Feature == "+v8.7a" &&
940 ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
941 ArchInfo = &llvm::AArch64::ARMV8_7A;
942 if (Feature == "+v8.8a" &&
943 ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
944 ArchInfo = &llvm::AArch64::ARMV8_8A;
945 if (Feature == "+v8.9a" &&
946 ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
947 ArchInfo = &llvm::AArch64::ARMV8_9A;
948 if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
949 ArchInfo = &llvm::AArch64::ARMV9A;
950 if (Feature == "+v9.1a" &&
951 ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
952 ArchInfo = &llvm::AArch64::ARMV9_1A;
953 if (Feature == "+v9.2a" &&
954 ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
955 ArchInfo = &llvm::AArch64::ARMV9_2A;
956 if (Feature == "+v9.3a" &&
957 ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
958 ArchInfo = &llvm::AArch64::ARMV9_3A;
959 if (Feature == "+v9.4a" &&
960 ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
961 ArchInfo = &llvm::AArch64::ARMV9_4A;
962 if (Feature == "+v9.5a" &&
963 ArchInfo->Version < llvm::AArch64::ARMV9_5A.Version)
964 ArchInfo = &llvm::AArch64::ARMV9_5A;
965 if (Feature == "+v8r")
966 ArchInfo = &llvm::AArch64::ARMV8R;
967 if (Feature == "+fullfp16") {
968 FPU |= NeonMode;
969 HasFullFP16 = true;
970 }
971 if (Feature == "+dotprod") {
972 FPU |= NeonMode;
973 HasDotProd = true;
974 }
975 if (Feature == "+fp16fml") {
976 FPU |= NeonMode;
977 HasFullFP16 = true;
978 HasFP16FML = true;
979 }
980 if (Feature == "+mte")
981 HasMTE = true;
982 if (Feature == "+tme")
983 HasTME = true;
984 if (Feature == "+pauth")
985 HasPAuth = true;
986 if (Feature == "+i8mm")
987 HasMatMul = true;
988 if (Feature == "+bf16")
989 HasBFloat16 = true;
990 if (Feature == "+lse")
991 HasLSE = true;
992 if (Feature == "+ls64")
993 HasLS64 = true;
994 if (Feature == "+rand")
995 HasRandGen = true;
996 if (Feature == "+flagm")
997 HasFlagM = true;
998 if (Feature == "+altnzcv") {
999 HasFlagM = true;
1000 HasAlternativeNZCV = true;
1001 }
1002 if (Feature == "+mops")
1003 HasMOPS = true;
1004 if (Feature == "+d128")
1005 HasD128 = true;
1006 if (Feature == "+gcs")
1007 HasGCS = true;
1008 if (Feature == "+rcpc3")
1009 HasRCPC3 = true;
1010 if (Feature == "+pauth-lr") {
1011 HasPAuthLR = true;
1012 HasPAuth = true;
1013 }
1014 }
1015
1016 // Check features that are manually disabled by command line options.
1017 // This needs to be checked after architecture-related features are handled,
1018 // making sure they are properly disabled when required.
1019 for (const auto &Feature : Features) {
1020 if (Feature == "-d128")
1021 HasD128 = false;
1022 }
1023
1024 setDataLayout();
1025 setArchFeatures();
1026
1027 if (HasNoFP) {
1028 FPU &= ~FPUMode;
1029 FPU &= ~NeonMode;
1030 FPU &= ~SveMode;
1031 }
1032 if (HasNoNeon) {
1033 FPU &= ~NeonMode;
1034 FPU &= ~SveMode;
1035 }
1036 if (HasNoSVE)
1037 FPU &= ~SveMode;
1038
1039 return true;
1040}
1041
1042bool AArch64TargetInfo::initFeatureMap(
1043 llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
1044 const std::vector<std::string> &FeaturesVec) const {
1045 std::vector<std::string> UpdatedFeaturesVec;
1046 // Parse the CPU and add any implied features.
1047 std::optional<llvm::AArch64::CpuInfo> CpuInfo = llvm::AArch64::parseCpu(Name: CPU);
1048 if (CpuInfo) {
1049 auto Exts = CpuInfo->getImpliedExtensions();
1050 std::vector<StringRef> CPUFeats;
1051 llvm::AArch64::getExtensionFeatures(Extensions: Exts, Features&: CPUFeats);
1052 for (auto F : CPUFeats) {
1053 assert((F[0] == '+' || F[0] == '-') && "Expected +/- in target feature!");
1054 UpdatedFeaturesVec.push_back(x: F.str());
1055 }
1056 }
1057
1058 // Process target and dependent features. This is done in two loops collecting
1059 // them into UpdatedFeaturesVec: first to add dependent '+'features, second to
1060 // add target '+/-'features that can later disable some of features added on
1061 // the first loop. Function Multi Versioning features begin with '?'.
1062 for (const auto &Feature : FeaturesVec)
1063 if (((Feature[0] == '?' || Feature[0] == '+')) &&
1064 AArch64TargetInfo::doesFeatureAffectCodeGen(Name: Feature.substr(pos: 1))) {
1065 StringRef DepFeatures =
1066 AArch64TargetInfo::getFeatureDependencies(Name: Feature.substr(pos: 1));
1067 SmallVector<StringRef, 1> AttrFeatures;
1068 DepFeatures.split(A&: AttrFeatures, Separator: ",");
1069 for (auto F : AttrFeatures)
1070 UpdatedFeaturesVec.push_back(x: F.str());
1071 }
1072 for (const auto &Feature : FeaturesVec)
1073 if (Feature[0] != '?') {
1074 std::string UpdatedFeature = Feature;
1075 if (Feature[0] == '+') {
1076 std::optional<llvm::AArch64::ExtensionInfo> Extension =
1077 llvm::AArch64::parseArchExtension(Extension: Feature.substr(pos: 1));
1078 if (Extension)
1079 UpdatedFeature = Extension->Feature.str();
1080 }
1081 UpdatedFeaturesVec.push_back(x: UpdatedFeature);
1082 }
1083
1084 return TargetInfo::initFeatureMap(Features, Diags, CPU, FeatureVec: UpdatedFeaturesVec);
1085}
1086
1087// Parse AArch64 Target attributes, which are a comma separated list of:
1088// "arch=<arch>" - parsed to features as per -march=..
1089// "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
1090// "tune=<cpu>" - TuneCPU set to <cpu>
1091// "feature", "no-feature" - Add (or remove) feature.
1092// "+feature", "+nofeature" - Add (or remove) feature.
1093ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
1094 ParsedTargetAttr Ret;
1095 if (Features == "default")
1096 return Ret;
1097 SmallVector<StringRef, 1> AttrFeatures;
1098 Features.split(A&: AttrFeatures, Separator: ",");
1099 bool FoundArch = false;
1100
1101 auto SplitAndAddFeatures = [](StringRef FeatString,
1102 std::vector<std::string> &Features) {
1103 SmallVector<StringRef, 8> SplitFeatures;
1104 FeatString.split(A&: SplitFeatures, Separator: StringRef("+"), MaxSplit: -1, KeepEmpty: false);
1105 for (StringRef Feature : SplitFeatures) {
1106 StringRef FeatureName = llvm::AArch64::getArchExtFeature(ArchExt: Feature);
1107 if (!FeatureName.empty())
1108 Features.push_back(x: FeatureName.str());
1109 else
1110 // Pushing the original feature string to give a sema error later on
1111 // when they get checked.
1112 if (Feature.starts_with(Prefix: "no"))
1113 Features.push_back(x: "-" + Feature.drop_front(N: 2).str());
1114 else
1115 Features.push_back(x: "+" + Feature.str());
1116 }
1117 };
1118
1119 for (auto &Feature : AttrFeatures) {
1120 Feature = Feature.trim();
1121 if (Feature.starts_with(Prefix: "fpmath="))
1122 continue;
1123
1124 if (Feature.starts_with(Prefix: "branch-protection=")) {
1125 Ret.BranchProtection = Feature.split(Separator: '=').second.trim();
1126 continue;
1127 }
1128
1129 if (Feature.starts_with(Prefix: "arch=")) {
1130 if (FoundArch)
1131 Ret.Duplicate = "arch=";
1132 FoundArch = true;
1133 std::pair<StringRef, StringRef> Split =
1134 Feature.split(Separator: "=").second.trim().split(Separator: "+");
1135 const llvm::AArch64::ArchInfo *AI = llvm::AArch64::parseArch(Arch: Split.first);
1136
1137 // Parse the architecture version, adding the required features to
1138 // Ret.Features.
1139 if (!AI)
1140 continue;
1141 Ret.Features.push_back(x: AI->ArchFeature.str());
1142 // Add any extra features, after the +
1143 SplitAndAddFeatures(Split.second, Ret.Features);
1144 } else if (Feature.starts_with(Prefix: "cpu=")) {
1145 if (!Ret.CPU.empty())
1146 Ret.Duplicate = "cpu=";
1147 else {
1148 // Split the cpu string into "cpu=", "cortex-a710" and any remaining
1149 // "+feat" features.
1150 std::pair<StringRef, StringRef> Split =
1151 Feature.split(Separator: "=").second.trim().split(Separator: "+");
1152 Ret.CPU = Split.first;
1153 SplitAndAddFeatures(Split.second, Ret.Features);
1154 }
1155 } else if (Feature.starts_with(Prefix: "tune=")) {
1156 if (!Ret.Tune.empty())
1157 Ret.Duplicate = "tune=";
1158 else
1159 Ret.Tune = Feature.split(Separator: "=").second.trim();
1160 } else if (Feature.starts_with(Prefix: "+")) {
1161 SplitAndAddFeatures(Feature, Ret.Features);
1162 } else if (Feature.starts_with(Prefix: "no-")) {
1163 StringRef FeatureName =
1164 llvm::AArch64::getArchExtFeature(ArchExt: Feature.split(Separator: "-").second);
1165 if (!FeatureName.empty())
1166 Ret.Features.push_back(x: "-" + FeatureName.drop_front(N: 1).str());
1167 else
1168 Ret.Features.push_back(x: "-" + Feature.split(Separator: "-").second.str());
1169 } else {
1170 // Try parsing the string to the internal target feature name. If it is
1171 // invalid, add the original string (which could already be an internal
1172 // name). These should be checked later by isValidFeatureName.
1173 StringRef FeatureName = llvm::AArch64::getArchExtFeature(ArchExt: Feature);
1174 if (!FeatureName.empty())
1175 Ret.Features.push_back(x: FeatureName.str());
1176 else
1177 Ret.Features.push_back(x: "+" + Feature.str());
1178 }
1179 }
1180 return Ret;
1181}
1182
1183bool AArch64TargetInfo::hasBFloat16Type() const {
1184 return true;
1185}
1186
1187TargetInfo::CallingConvCheckResult
1188AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
1189 switch (CC) {
1190 case CC_C:
1191 case CC_Swift:
1192 case CC_SwiftAsync:
1193 case CC_PreserveMost:
1194 case CC_PreserveAll:
1195 case CC_OpenCLKernel:
1196 case CC_AArch64VectorCall:
1197 case CC_AArch64SVEPCS:
1198 case CC_Win64:
1199 return CCCR_OK;
1200 default:
1201 return CCCR_Warning;
1202 }
1203}
1204
1205bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
1206
1207TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
1208 return TargetInfo::AArch64ABIBuiltinVaList;
1209}
1210
1211const char *const AArch64TargetInfo::GCCRegNames[] = {
1212 // clang-format off
1213
1214 // 32-bit Integer registers
1215 "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
1216 "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
1217 "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
1218
1219 // 64-bit Integer registers
1220 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
1221 "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
1222 "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
1223
1224 // 32-bit floating point regsisters
1225 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
1226 "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
1227 "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
1228
1229 // 64-bit floating point regsisters
1230 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
1231 "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
1232 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
1233
1234 // Neon vector registers
1235 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
1236 "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
1237 "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
1238
1239 // SVE vector registers
1240 "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10",
1241 "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
1242 "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
1243
1244 // SVE predicate registers
1245 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10",
1246 "p11", "p12", "p13", "p14", "p15",
1247
1248 // SVE predicate-as-counter registers
1249 "pn0", "pn1", "pn2", "pn3", "pn4", "pn5", "pn6", "pn7", "pn8",
1250 "pn9", "pn10", "pn11", "pn12", "pn13", "pn14", "pn15",
1251
1252 // SME registers
1253 "za", "zt0",
1254
1255 // clang-format on
1256};
1257
1258ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
1259 return llvm::ArrayRef(GCCRegNames);
1260}
1261
1262const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
1263 {.Aliases: {"w31"}, .Register: "wsp"},
1264 {.Aliases: {"x31"}, .Register: "sp"},
1265 // GCC rN registers are aliases of xN registers.
1266 {.Aliases: {"r0"}, .Register: "x0"},
1267 {.Aliases: {"r1"}, .Register: "x1"},
1268 {.Aliases: {"r2"}, .Register: "x2"},
1269 {.Aliases: {"r3"}, .Register: "x3"},
1270 {.Aliases: {"r4"}, .Register: "x4"},
1271 {.Aliases: {"r5"}, .Register: "x5"},
1272 {.Aliases: {"r6"}, .Register: "x6"},
1273 {.Aliases: {"r7"}, .Register: "x7"},
1274 {.Aliases: {"r8"}, .Register: "x8"},
1275 {.Aliases: {"r9"}, .Register: "x9"},
1276 {.Aliases: {"r10"}, .Register: "x10"},
1277 {.Aliases: {"r11"}, .Register: "x11"},
1278 {.Aliases: {"r12"}, .Register: "x12"},
1279 {.Aliases: {"r13"}, .Register: "x13"},
1280 {.Aliases: {"r14"}, .Register: "x14"},
1281 {.Aliases: {"r15"}, .Register: "x15"},
1282 {.Aliases: {"r16"}, .Register: "x16"},
1283 {.Aliases: {"r17"}, .Register: "x17"},
1284 {.Aliases: {"r18"}, .Register: "x18"},
1285 {.Aliases: {"r19"}, .Register: "x19"},
1286 {.Aliases: {"r20"}, .Register: "x20"},
1287 {.Aliases: {"r21"}, .Register: "x21"},
1288 {.Aliases: {"r22"}, .Register: "x22"},
1289 {.Aliases: {"r23"}, .Register: "x23"},
1290 {.Aliases: {"r24"}, .Register: "x24"},
1291 {.Aliases: {"r25"}, .Register: "x25"},
1292 {.Aliases: {"r26"}, .Register: "x26"},
1293 {.Aliases: {"r27"}, .Register: "x27"},
1294 {.Aliases: {"r28"}, .Register: "x28"},
1295 {.Aliases: {"r29", "x29"}, .Register: "fp"},
1296 {.Aliases: {"r30", "x30"}, .Register: "lr"},
1297 // The S/D/Q and W/X registers overlap, but aren't really aliases; we
1298 // don't want to substitute one of these for a different-sized one.
1299};
1300
1301ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
1302 return llvm::ArrayRef(GCCRegAliases);
1303}
1304
1305// Returns the length of cc constraint.
1306static unsigned matchAsmCCConstraint(const char *Name) {
1307 constexpr unsigned len = 5;
1308 auto RV = llvm::StringSwitch<unsigned>(Name)
1309 .Case(S: "@cceq", Value: len)
1310 .Case(S: "@ccne", Value: len)
1311 .Case(S: "@cchs", Value: len)
1312 .Case(S: "@cccs", Value: len)
1313 .Case(S: "@cccc", Value: len)
1314 .Case(S: "@cclo", Value: len)
1315 .Case(S: "@ccmi", Value: len)
1316 .Case(S: "@ccpl", Value: len)
1317 .Case(S: "@ccvs", Value: len)
1318 .Case(S: "@ccvc", Value: len)
1319 .Case(S: "@cchi", Value: len)
1320 .Case(S: "@ccls", Value: len)
1321 .Case(S: "@ccge", Value: len)
1322 .Case(S: "@cclt", Value: len)
1323 .Case(S: "@ccgt", Value: len)
1324 .Case(S: "@ccle", Value: len)
1325 .Default(Value: 0);
1326 return RV;
1327}
1328
1329std::string
1330AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
1331 std::string R;
1332 switch (*Constraint) {
1333 case 'U': // Three-character constraint; add "@3" hint for later parsing.
1334 R = std::string("@3") + std::string(Constraint, 3);
1335 Constraint += 2;
1336 break;
1337 case '@':
1338 if (const unsigned Len = matchAsmCCConstraint(Name: Constraint)) {
1339 std::string Converted = "{" + std::string(Constraint, Len) + "}";
1340 Constraint += Len - 1;
1341 return Converted;
1342 }
1343 return std::string(1, *Constraint);
1344 default:
1345 R = TargetInfo::convertConstraint(Constraint);
1346 break;
1347 }
1348 return R;
1349}
1350
1351bool AArch64TargetInfo::validateAsmConstraint(
1352 const char *&Name, TargetInfo::ConstraintInfo &Info) const {
1353 switch (*Name) {
1354 default:
1355 return false;
1356 case 'w': // Floating point and SIMD registers (V0-V31)
1357 Info.setAllowsRegister();
1358 return true;
1359 case 'I': // Constant that can be used with an ADD instruction
1360 case 'J': // Constant that can be used with a SUB instruction
1361 case 'K': // Constant that can be used with a 32-bit logical instruction
1362 case 'L': // Constant that can be used with a 64-bit logical instruction
1363 case 'M': // Constant that can be used as a 32-bit MOV immediate
1364 case 'N': // Constant that can be used as a 64-bit MOV immediate
1365 case 'Y': // Floating point constant zero
1366 case 'Z': // Integer constant zero
1367 return true;
1368 case 'Q': // A memory reference with base register and no offset
1369 Info.setAllowsMemory();
1370 return true;
1371 case 'S': // A symbolic address
1372 Info.setAllowsRegister();
1373 return true;
1374 case 'U':
1375 if (Name[1] == 'p' &&
1376 (Name[2] == 'l' || Name[2] == 'a' || Name[2] == 'h')) {
1377 // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7, "Uph"=P8-P15)
1378 Info.setAllowsRegister();
1379 Name += 2;
1380 return true;
1381 }
1382 if (Name[1] == 'c' && (Name[2] == 'i' || Name[2] == 'j')) {
1383 // Gpr registers ("Uci"=w8-11, "Ucj"=w12-15)
1384 Info.setAllowsRegister();
1385 Name += 2;
1386 return true;
1387 }
1388 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
1389 // Utf: A memory address suitable for ldp/stp in TF mode.
1390 // Usa: An absolute symbolic address.
1391 // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
1392
1393 // Better to return an error saying that it's an unrecognised constraint
1394 // even if this is a valid constraint in gcc.
1395 return false;
1396 case 'z': // Zero register, wzr or xzr
1397 Info.setAllowsRegister();
1398 return true;
1399 case 'x': // Floating point and SIMD registers (V0-V15)
1400 Info.setAllowsRegister();
1401 return true;
1402 case 'y': // SVE registers (V0-V7)
1403 Info.setAllowsRegister();
1404 return true;
1405 case '@':
1406 // CC condition
1407 if (const unsigned Len = matchAsmCCConstraint(Name)) {
1408 Name += Len - 1;
1409 Info.setAllowsRegister();
1410 return true;
1411 }
1412 }
1413 return false;
1414}
1415
1416bool AArch64TargetInfo::validateConstraintModifier(
1417 StringRef Constraint, char Modifier, unsigned Size,
1418 std::string &SuggestedModifier) const {
1419 // Strip off constraint modifiers.
1420 Constraint = Constraint.ltrim(Chars: "=+&");
1421
1422 switch (Constraint[0]) {
1423 default:
1424 return true;
1425 case 'z':
1426 case 'r': {
1427 switch (Modifier) {
1428 case 'x':
1429 case 'w':
1430 // For now assume that the person knows what they're
1431 // doing with the modifier.
1432 return true;
1433 default:
1434 // By default an 'r' constraint will be in the 'x'
1435 // registers.
1436 if (Size == 64)
1437 return true;
1438
1439 if (Size == 512)
1440 return HasLS64;
1441
1442 SuggestedModifier = "w";
1443 return false;
1444 }
1445 }
1446 }
1447}
1448
1449std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
1450
1451int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
1452 if (RegNo == 0)
1453 return 0;
1454 if (RegNo == 1)
1455 return 1;
1456 return -1;
1457}
1458
1459bool AArch64TargetInfo::hasInt128Type() const { return true; }
1460
1461AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
1462 const TargetOptions &Opts)
1463 : AArch64TargetInfo(Triple, Opts) {}
1464
1465void AArch64leTargetInfo::setDataLayout() {
1466 if (getTriple().isOSBinFormatMachO()) {
1467 if(getTriple().isArch32Bit())
1468 resetDataLayout(DL: "e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", UserLabelPrefix: "_");
1469 else
1470 resetDataLayout(DL: "e-m:o-i64:64-i128:128-n32:64-S128", UserLabelPrefix: "_");
1471 } else
1472 resetDataLayout(DL: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1473}
1474
1475void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
1476 MacroBuilder &Builder) const {
1477 Builder.defineMacro(Name: "__AARCH64EL__");
1478 AArch64TargetInfo::getTargetDefines(Opts, Builder);
1479}
1480
1481AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
1482 const TargetOptions &Opts)
1483 : AArch64TargetInfo(Triple, Opts) {}
1484
1485void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
1486 MacroBuilder &Builder) const {
1487 Builder.defineMacro(Name: "__AARCH64EB__");
1488 Builder.defineMacro(Name: "__AARCH_BIG_ENDIAN");
1489 Builder.defineMacro(Name: "__ARM_BIG_ENDIAN");
1490 AArch64TargetInfo::getTargetDefines(Opts, Builder);
1491}
1492
1493void AArch64beTargetInfo::setDataLayout() {
1494 assert(!getTriple().isOSBinFormatMachO());
1495 resetDataLayout(DL: "E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1496}
1497
1498WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
1499 const TargetOptions &Opts)
1500 : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
1501
1502 // This is an LLP64 platform.
1503 // int:4, long:4, long long:8, long double:8.
1504 IntWidth = IntAlign = 32;
1505 LongWidth = LongAlign = 32;
1506 DoubleAlign = LongLongAlign = 64;
1507 LongDoubleWidth = LongDoubleAlign = 64;
1508 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1509 IntMaxType = SignedLongLong;
1510 Int64Type = SignedLongLong;
1511 SizeType = UnsignedLongLong;
1512 PtrDiffType = SignedLongLong;
1513 IntPtrType = SignedLongLong;
1514}
1515
1516void WindowsARM64TargetInfo::setDataLayout() {
1517 resetDataLayout(DL: Triple.isOSBinFormatMachO()
1518 ? "e-m:o-i64:64-i128:128-n32:64-S128"
1519 : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
1520 UserLabelPrefix: Triple.isOSBinFormatMachO() ? "_" : "");
1521}
1522
1523TargetInfo::BuiltinVaListKind
1524WindowsARM64TargetInfo::getBuiltinVaListKind() const {
1525 return TargetInfo::CharPtrBuiltinVaList;
1526}
1527
1528TargetInfo::CallingConvCheckResult
1529WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
1530 switch (CC) {
1531 case CC_X86StdCall:
1532 case CC_X86ThisCall:
1533 case CC_X86FastCall:
1534 case CC_X86VectorCall:
1535 return CCCR_Ignore;
1536 case CC_C:
1537 case CC_OpenCLKernel:
1538 case CC_PreserveMost:
1539 case CC_PreserveAll:
1540 case CC_Swift:
1541 case CC_SwiftAsync:
1542 case CC_Win64:
1543 return CCCR_OK;
1544 default:
1545 return CCCR_Warning;
1546 }
1547}
1548
1549MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
1550 const TargetOptions &Opts)
1551 : WindowsARM64TargetInfo(Triple, Opts) {
1552 TheCXXABI.set(TargetCXXABI::Microsoft);
1553}
1554
1555void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
1556 MacroBuilder &Builder) const {
1557 WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
1558 if (getTriple().isWindowsArm64EC()) {
1559 Builder.defineMacro(Name: "_M_X64", Value: "100");
1560 Builder.defineMacro(Name: "_M_AMD64", Value: "100");
1561 Builder.defineMacro(Name: "_M_ARM64EC", Value: "1");
1562 } else {
1563 Builder.defineMacro(Name: "_M_ARM64", Value: "1");
1564 }
1565}
1566
1567TargetInfo::CallingConvKind
1568MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
1569 return CCK_MicrosoftWin64;
1570}
1571
1572unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize,
1573 bool HasNonWeakDef) const {
1574 unsigned Align =
1575 WindowsARM64TargetInfo::getMinGlobalAlign(Size: TypeSize, HasNonWeakDef);
1576
1577 // MSVC does size based alignment for arm64 based on alignment section in
1578 // below document, replicate that to keep alignment consistent with object
1579 // files compiled by MSVC.
1580 // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
1581 if (TypeSize >= 512) { // TypeSize >= 64 bytes
1582 Align = std::max(a: Align, b: 128u); // align type at least 16 bytes
1583 } else if (TypeSize >= 64) { // TypeSize >= 8 bytes
1584 Align = std::max(a: Align, b: 64u); // align type at least 8 butes
1585 } else if (TypeSize >= 16) { // TypeSize >= 2 bytes
1586 Align = std::max(a: Align, b: 32u); // align type at least 4 bytes
1587 }
1588 return Align;
1589}
1590
1591MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1592 const TargetOptions &Opts)
1593 : WindowsARM64TargetInfo(Triple, Opts) {
1594 TheCXXABI.set(TargetCXXABI::GenericAArch64);
1595}
1596
1597DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1598 const TargetOptions &Opts)
1599 : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1600 Int64Type = SignedLongLong;
1601 if (getTriple().isArch32Bit())
1602 IntMaxType = SignedLongLong;
1603
1604 WCharType = SignedInt;
1605 UseSignedCharForObjCBool = false;
1606
1607 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1608 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1609
1610 UseZeroLengthBitfieldAlignment = false;
1611
1612 if (getTriple().isArch32Bit()) {
1613 UseBitFieldTypeAlignment = false;
1614 ZeroLengthBitfieldBoundary = 32;
1615 UseZeroLengthBitfieldAlignment = true;
1616 TheCXXABI.set(TargetCXXABI::WatchOS);
1617 } else
1618 TheCXXABI.set(TargetCXXABI::AppleARM64);
1619}
1620
1621void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1622 const llvm::Triple &Triple,
1623 MacroBuilder &Builder) const {
1624 Builder.defineMacro(Name: "__AARCH64_SIMD__");
1625 if (Triple.isArch32Bit())
1626 Builder.defineMacro(Name: "__ARM64_ARCH_8_32__");
1627 else
1628 Builder.defineMacro(Name: "__ARM64_ARCH_8__");
1629 Builder.defineMacro(Name: "__ARM_NEON__");
1630 Builder.defineMacro(Name: "__REGISTER_PREFIX__", Value: "");
1631 Builder.defineMacro(Name: "__arm64", Value: "1");
1632 Builder.defineMacro(Name: "__arm64__", Value: "1");
1633
1634 if (Triple.isArm64e())
1635 Builder.defineMacro(Name: "__arm64e__", Value: "1");
1636
1637 getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1638}
1639
1640TargetInfo::BuiltinVaListKind
1641DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1642 return TargetInfo::CharPtrBuiltinVaList;
1643}
1644
1645// 64-bit RenderScript is aarch64
1646RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1647 const TargetOptions &Opts)
1648 : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1649 Triple.getOSName(),
1650 Triple.getEnvironmentName()),
1651 Opts) {
1652 IsRenderScriptTarget = true;
1653}
1654
1655void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1656 MacroBuilder &Builder) const {
1657 Builder.defineMacro(Name: "__RENDERSCRIPT__");
1658 AArch64leTargetInfo::getTargetDefines(Opts, Builder);
1659}
1660

source code of clang/lib/Basic/Targets/AArch64.cpp