1//===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements AArch64 TargetInfo objects.
10//
11//===----------------------------------------------------------------------===//
12
13#include "AArch64.h"
14#include "clang/Basic/Diagnostic.h"
15#include "clang/Basic/LangOptions.h"
16#include "clang/Basic/TargetBuiltins.h"
17#include "clang/Basic/TargetInfo.h"
18#include "llvm/ADT/APSInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/StringExtras.h"
21#include "llvm/ADT/StringSwitch.h"
22#include "llvm/TargetParser/AArch64TargetParser.h"
23#include "llvm/TargetParser/ARMTargetParserCommon.h"
24#include <optional>
25
26using namespace clang;
27using namespace clang::targets;
28
29static constexpr Builtin::Info BuiltinInfo[] = {
30#define BUILTIN(ID, TYPE, ATTRS) \
31 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
32#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
33 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
34#include "clang/Basic/BuiltinsNEON.def"
35
36#define BUILTIN(ID, TYPE, ATTRS) \
37 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
38#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
39 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
40#include "clang/Basic/BuiltinsSVE.def"
41
42#define BUILTIN(ID, TYPE, ATTRS) \
43 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
44#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
45 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
46#include "clang/Basic/BuiltinsSME.def"
47
48#define BUILTIN(ID, TYPE, ATTRS) \
49 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
50#define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
51 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
52#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
53 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
54#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
55 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
56#include "clang/Basic/BuiltinsAArch64.def"
57};
58
59void AArch64TargetInfo::setArchFeatures() {
60 if (*ArchInfo == llvm::AArch64::ARMV8R) {
61 HasDotProd = true;
62 HasDIT = true;
63 HasFlagM = true;
64 HasRCPC = true;
65 FPU |= NeonMode;
66 HasCCPP = true;
67 HasCRC = true;
68 HasLSE = true;
69 HasRDM = true;
70 } else if (ArchInfo->Version.getMajor() == 8) {
71 if (ArchInfo->Version.getMinor() >= 7u) {
72 HasWFxT = true;
73 }
74 if (ArchInfo->Version.getMinor() >= 6u) {
75 HasBFloat16 = true;
76 HasMatMul = true;
77 }
78 if (ArchInfo->Version.getMinor() >= 5u) {
79 HasAlternativeNZCV = true;
80 HasFRInt3264 = true;
81 HasSSBS = true;
82 HasSB = true;
83 HasPredRes = true;
84 HasBTI = true;
85 }
86 if (ArchInfo->Version.getMinor() >= 4u) {
87 HasDotProd = true;
88 HasDIT = true;
89 HasFlagM = true;
90 }
91 if (ArchInfo->Version.getMinor() >= 3u) {
92 HasRCPC = true;
93 FPU |= NeonMode;
94 }
95 if (ArchInfo->Version.getMinor() >= 2u) {
96 HasCCPP = true;
97 }
98 if (ArchInfo->Version.getMinor() >= 1u) {
99 HasCRC = true;
100 HasLSE = true;
101 HasRDM = true;
102 }
103 } else if (ArchInfo->Version.getMajor() == 9) {
104 if (ArchInfo->Version.getMinor() >= 2u) {
105 HasWFxT = true;
106 }
107 if (ArchInfo->Version.getMinor() >= 1u) {
108 HasBFloat16 = true;
109 HasMatMul = true;
110 }
111 FPU |= SveMode;
112 HasSVE2 = true;
113 HasFullFP16 = true;
114 HasAlternativeNZCV = true;
115 HasFRInt3264 = true;
116 HasSSBS = true;
117 HasSB = true;
118 HasPredRes = true;
119 HasBTI = true;
120 HasDotProd = true;
121 HasDIT = true;
122 HasFlagM = true;
123 HasRCPC = true;
124 FPU |= NeonMode;
125 HasCCPP = true;
126 HasCRC = true;
127 HasLSE = true;
128 HasRDM = true;
129 }
130}
131
132AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
133 const TargetOptions &Opts)
134 : TargetInfo(Triple), ABI("aapcs") {
135 if (getTriple().isOSOpenBSD()) {
136 Int64Type = SignedLongLong;
137 IntMaxType = SignedLongLong;
138 } else {
139 if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
140 WCharType = UnsignedInt;
141
142 Int64Type = SignedLong;
143 IntMaxType = SignedLong;
144 }
145
146 // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
147 HasLegalHalfType = true;
148 HalfArgsAndReturns = true;
149 HasFloat16 = true;
150 HasStrictFP = true;
151
152 if (Triple.isArch64Bit())
153 LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
154 else
155 LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
156
157 MaxVectorAlign = 128;
158 MaxAtomicInlineWidth = 128;
159 MaxAtomicPromoteWidth = 128;
160
161 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
162 LongDoubleFormat = &llvm::APFloat::IEEEquad();
163
164 BFloat16Width = BFloat16Align = 16;
165 BFloat16Format = &llvm::APFloat::BFloat();
166
167 // Make __builtin_ms_va_list available.
168 HasBuiltinMSVaList = true;
169
170 // Make the SVE types available. Note that this deliberately doesn't
171 // depend on SveMode, since in principle it should be possible to turn
172 // SVE on and off within a translation unit. It should also be possible
173 // to compile the global declaration:
174 //
175 // __SVInt8_t *ptr;
176 //
177 // even without SVE.
178 HasAArch64SVETypes = true;
179
180 // {} in inline assembly are neon specifiers, not assembly variant
181 // specifiers.
182 NoAsmVariants = true;
183
184 // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
185 // contributes to the alignment of the containing aggregate in the same way
186 // a plain (non bit-field) member of that type would, without exception for
187 // zero-sized or anonymous bit-fields."
188 assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
189 UseZeroLengthBitfieldAlignment = true;
190
191 HasUnalignedAccess = true;
192
193 // AArch64 targets default to using the ARM C++ ABI.
194 TheCXXABI.set(TargetCXXABI::GenericAArch64);
195
196 if (Triple.getOS() == llvm::Triple::Linux)
197 this->MCountName = "\01_mcount";
198 else if (Triple.getOS() == llvm::Triple::UnknownOS)
199 this->MCountName =
200 Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
201}
202
203StringRef AArch64TargetInfo::getABI() const { return ABI; }
204
205bool AArch64TargetInfo::setABI(const std::string &Name) {
206 if (Name != "aapcs" && Name != "aapcs-soft" && Name != "darwinpcs")
207 return false;
208
209 ABI = Name;
210 return true;
211}
212
213bool AArch64TargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
214 if (hasFeature(Feature: "fp") && ABI == "aapcs-soft") {
215 // aapcs-soft is not allowed for targets with an FPU, to avoid there being
216 // two incomatible ABIs.
217 Diags.Report(diag::err_target_unsupported_abi_with_fpu) << ABI;
218 return false;
219 }
220 return true;
221}
222
223bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
224 BranchProtectionInfo &BPI,
225 StringRef &Err) const {
226 llvm::ARM::ParsedBranchProtection PBP;
227 if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
228 return false;
229
230 BPI.SignReturnAddr =
231 llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
232 .Case(S: "non-leaf", Value: LangOptions::SignReturnAddressScopeKind::NonLeaf)
233 .Case(S: "all", Value: LangOptions::SignReturnAddressScopeKind::All)
234 .Default(Value: LangOptions::SignReturnAddressScopeKind::None);
235
236 if (PBP.Key == "a_key")
237 BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
238 else
239 BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
240
241 BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
242 BPI.BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
243 BPI.GuardedControlStack = PBP.GuardedControlStack;
244 return true;
245}
246
247bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
248 return Name == "generic" || llvm::AArch64::parseCpu(Name);
249}
250
251bool AArch64TargetInfo::setCPU(const std::string &Name) {
252 return isValidCPUName(Name);
253}
254
255void AArch64TargetInfo::fillValidCPUList(
256 SmallVectorImpl<StringRef> &Values) const {
257 llvm::AArch64::fillValidCPUArchList(Values);
258}
259
260void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
261 MacroBuilder &Builder) const {
262 Builder.defineMacro(Name: "__ARM_FEATURE_QRDMX", Value: "1");
263}
264
265void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
266 MacroBuilder &Builder) const {
267 // Also include the ARMv8.1 defines
268 getTargetDefinesARMV81A(Opts, Builder);
269}
270
271void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
272 MacroBuilder &Builder) const {
273 Builder.defineMacro(Name: "__ARM_FEATURE_COMPLEX", Value: "1");
274 Builder.defineMacro(Name: "__ARM_FEATURE_JCVT", Value: "1");
275 // Also include the Armv8.2 defines
276 getTargetDefinesARMV82A(Opts, Builder);
277}
278
279void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
280 MacroBuilder &Builder) const {
281 // Also include the Armv8.3 defines
282 getTargetDefinesARMV83A(Opts, Builder);
283}
284
285void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
286 MacroBuilder &Builder) const {
287 Builder.defineMacro(Name: "__ARM_FEATURE_FRINT", Value: "1");
288 Builder.defineMacro(Name: "__ARM_FEATURE_BTI", Value: "1");
289 // Also include the Armv8.4 defines
290 getTargetDefinesARMV84A(Opts, Builder);
291}
292
293void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
294 MacroBuilder &Builder) const {
295 // Also include the Armv8.5 defines
296 // FIXME: Armv8.6 makes the following extensions mandatory:
297 // - __ARM_FEATURE_BF16
298 // - __ARM_FEATURE_MATMUL_INT8
299 // Handle them here.
300 getTargetDefinesARMV85A(Opts, Builder);
301}
302
303void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
304 MacroBuilder &Builder) const {
305 // Also include the Armv8.6 defines
306 getTargetDefinesARMV86A(Opts, Builder);
307}
308
309void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
310 MacroBuilder &Builder) const {
311 // Also include the Armv8.7 defines
312 getTargetDefinesARMV87A(Opts, Builder);
313}
314
315void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions &Opts,
316 MacroBuilder &Builder) const {
317 // Also include the Armv8.8 defines
318 getTargetDefinesARMV88A(Opts, Builder);
319}
320
321void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
322 MacroBuilder &Builder) const {
323 // Armv9-A maps to Armv8.5-A
324 getTargetDefinesARMV85A(Opts, Builder);
325}
326
327void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
328 MacroBuilder &Builder) const {
329 // Armv9.1-A maps to Armv8.6-A
330 getTargetDefinesARMV86A(Opts, Builder);
331}
332
333void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
334 MacroBuilder &Builder) const {
335 // Armv9.2-A maps to Armv8.7-A
336 getTargetDefinesARMV87A(Opts, Builder);
337}
338
339void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
340 MacroBuilder &Builder) const {
341 // Armv9.3-A maps to Armv8.8-A
342 getTargetDefinesARMV88A(Opts, Builder);
343}
344
345void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions &Opts,
346 MacroBuilder &Builder) const {
347 // Armv9.4-A maps to Armv8.9-A
348 getTargetDefinesARMV89A(Opts, Builder);
349}
350
351void AArch64TargetInfo::getTargetDefinesARMV95A(const LangOptions &Opts,
352 MacroBuilder &Builder) const {
353 // Armv9.5-A does not have a v8.* equivalent, but is a superset of v9.4-A.
354 getTargetDefinesARMV94A(Opts, Builder);
355}
356
357void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
358 MacroBuilder &Builder) const {
359 // Target identification.
360 if (getTriple().isWindowsArm64EC()) {
361 // Define the same set of macros as would be defined on x86_64 to ensure that
362 // ARM64EC datatype layouts match those of x86_64 compiled code
363 Builder.defineMacro(Name: "__amd64__");
364 Builder.defineMacro(Name: "__amd64");
365 Builder.defineMacro(Name: "__x86_64");
366 Builder.defineMacro(Name: "__x86_64__");
367 Builder.defineMacro(Name: "__arm64ec__");
368 } else {
369 Builder.defineMacro(Name: "__aarch64__");
370 }
371
372 // Inline assembly supports AArch64 flag outputs.
373 Builder.defineMacro(Name: "__GCC_ASM_FLAG_OUTPUTS__");
374
375 std::string CodeModel = getTargetOpts().CodeModel;
376 if (CodeModel == "default")
377 CodeModel = "small";
378 for (char &c : CodeModel)
379 c = toupper(c: c);
380 Builder.defineMacro(Name: "__AARCH64_CMODEL_" + CodeModel + "__");
381
382 // ACLE predefines. Many can only have one possible value on v8 AArch64.
383 Builder.defineMacro(Name: "__ARM_ACLE", Value: "200");
384 Builder.defineMacro(Name: "__ARM_ARCH",
385 Value: std::to_string(val: ArchInfo->Version.getMajor()));
386 Builder.defineMacro(Name: "__ARM_ARCH_PROFILE",
387 Value: std::string("'") + (char)ArchInfo->Profile + "'");
388
389 Builder.defineMacro(Name: "__ARM_64BIT_STATE", Value: "1");
390 Builder.defineMacro(Name: "__ARM_PCS_AAPCS64", Value: "1");
391 Builder.defineMacro(Name: "__ARM_ARCH_ISA_A64", Value: "1");
392
393 Builder.defineMacro(Name: "__ARM_FEATURE_CLZ", Value: "1");
394 Builder.defineMacro(Name: "__ARM_FEATURE_FMA", Value: "1");
395 Builder.defineMacro(Name: "__ARM_FEATURE_LDREX", Value: "0xF");
396 Builder.defineMacro(Name: "__ARM_FEATURE_IDIV", Value: "1"); // As specified in ACLE
397 Builder.defineMacro(Name: "__ARM_FEATURE_DIV"); // For backwards compatibility
398 Builder.defineMacro(Name: "__ARM_FEATURE_NUMERIC_MAXMIN", Value: "1");
399 Builder.defineMacro(Name: "__ARM_FEATURE_DIRECTED_ROUNDING", Value: "1");
400
401 Builder.defineMacro(Name: "__ARM_ALIGN_MAX_STACK_PWR", Value: "4");
402
403 // These macros are set when Clang can parse declarations with these
404 // attributes.
405 Builder.defineMacro(Name: "__ARM_STATE_ZA", Value: "1");
406 Builder.defineMacro(Name: "__ARM_STATE_ZT0", Value: "1");
407
408 // 0xe implies support for half, single and double precision operations.
409 if (FPU & FPUMode)
410 Builder.defineMacro(Name: "__ARM_FP", Value: "0xE");
411
412 // PCS specifies this for SysV variants, which is all we support. Other ABIs
413 // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
414 Builder.defineMacro(Name: "__ARM_FP16_FORMAT_IEEE", Value: "1");
415 Builder.defineMacro(Name: "__ARM_FP16_ARGS", Value: "1");
416
417 if (Opts.UnsafeFPMath)
418 Builder.defineMacro(Name: "__ARM_FP_FAST", Value: "1");
419
420 Builder.defineMacro(Name: "__ARM_SIZEOF_WCHAR_T",
421 Value: Twine(Opts.WCharSize ? Opts.WCharSize : 4));
422
423 Builder.defineMacro(Name: "__ARM_SIZEOF_MINIMAL_ENUM", Value: Opts.ShortEnums ? "1" : "4");
424
425 if (FPU & NeonMode) {
426 Builder.defineMacro(Name: "__ARM_NEON", Value: "1");
427 // 64-bit NEON supports half, single and double precision operations.
428 Builder.defineMacro(Name: "__ARM_NEON_FP", Value: "0xE");
429 }
430
431 if (FPU & SveMode)
432 Builder.defineMacro(Name: "__ARM_FEATURE_SVE", Value: "1");
433
434 if ((FPU & NeonMode) && (FPU & SveMode))
435 Builder.defineMacro(Name: "__ARM_NEON_SVE_BRIDGE", Value: "1");
436
437 if (HasSVE2)
438 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2", Value: "1");
439
440 if (HasSVE2 && HasSVE2AES)
441 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_AES", Value: "1");
442
443 if (HasSVE2 && HasSVE2BitPerm)
444 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_BITPERM", Value: "1");
445
446 if (HasSVE2 && HasSVE2SHA3)
447 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_SHA3", Value: "1");
448
449 if (HasSVE2 && HasSVE2SM4)
450 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_SM4", Value: "1");
451
452 if (HasSME) {
453 Builder.defineMacro(Name: "__ARM_FEATURE_SME");
454 Builder.defineMacro(Name: "__ARM_FEATURE_LOCALLY_STREAMING", Value: "1");
455 }
456
457 if (HasSME2) {
458 Builder.defineMacro(Name: "__ARM_FEATURE_SME");
459 Builder.defineMacro(Name: "__ARM_FEATURE_SME2");
460 Builder.defineMacro(Name: "__ARM_FEATURE_LOCALLY_STREAMING", Value: "1");
461 }
462
463 if (HasCRC)
464 Builder.defineMacro(Name: "__ARM_FEATURE_CRC32", Value: "1");
465
466 if (HasRCPC3)
467 Builder.defineMacro(Name: "__ARM_FEATURE_RCPC", Value: "3");
468 else if (HasRCPC)
469 Builder.defineMacro(Name: "__ARM_FEATURE_RCPC", Value: "1");
470
471 if (HasFMV)
472 Builder.defineMacro(Name: "__HAVE_FUNCTION_MULTI_VERSIONING", Value: "1");
473
474 // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
475 // macros for AES, SHA2, SHA3 and SM4
476 if (HasAES && HasSHA2)
477 Builder.defineMacro(Name: "__ARM_FEATURE_CRYPTO", Value: "1");
478
479 if (HasAES)
480 Builder.defineMacro(Name: "__ARM_FEATURE_AES", Value: "1");
481
482 if (HasSHA2)
483 Builder.defineMacro(Name: "__ARM_FEATURE_SHA2", Value: "1");
484
485 if (HasSHA3) {
486 Builder.defineMacro(Name: "__ARM_FEATURE_SHA3", Value: "1");
487 Builder.defineMacro(Name: "__ARM_FEATURE_SHA512", Value: "1");
488 }
489
490 if (HasSM4) {
491 Builder.defineMacro(Name: "__ARM_FEATURE_SM3", Value: "1");
492 Builder.defineMacro(Name: "__ARM_FEATURE_SM4", Value: "1");
493 }
494
495 if (HasPAuth)
496 Builder.defineMacro(Name: "__ARM_FEATURE_PAUTH", Value: "1");
497
498 if (HasPAuthLR)
499 Builder.defineMacro(Name: "__ARM_FEATURE_PAUTH_LR", Value: "1");
500
501 if (HasUnalignedAccess)
502 Builder.defineMacro(Name: "__ARM_FEATURE_UNALIGNED", Value: "1");
503
504 if ((FPU & NeonMode) && HasFullFP16)
505 Builder.defineMacro(Name: "__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", Value: "1");
506 if (HasFullFP16)
507 Builder.defineMacro(Name: "__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", Value: "1");
508
509 if (HasDotProd)
510 Builder.defineMacro(Name: "__ARM_FEATURE_DOTPROD", Value: "1");
511
512 if (HasMTE)
513 Builder.defineMacro(Name: "__ARM_FEATURE_MEMORY_TAGGING", Value: "1");
514
515 if (HasTME)
516 Builder.defineMacro(Name: "__ARM_FEATURE_TME", Value: "1");
517
518 if (HasMatMul)
519 Builder.defineMacro(Name: "__ARM_FEATURE_MATMUL_INT8", Value: "1");
520
521 if (HasLSE)
522 Builder.defineMacro(Name: "__ARM_FEATURE_ATOMICS", Value: "1");
523
524 if (HasBFloat16) {
525 Builder.defineMacro(Name: "__ARM_FEATURE_BF16", Value: "1");
526 Builder.defineMacro(Name: "__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", Value: "1");
527 Builder.defineMacro(Name: "__ARM_BF16_FORMAT_ALTERNATIVE", Value: "1");
528 Builder.defineMacro(Name: "__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", Value: "1");
529 }
530
531 if ((FPU & SveMode) && HasBFloat16) {
532 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_BF16", Value: "1");
533 }
534
535 if ((FPU & SveMode) && HasMatmulFP64)
536 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_MATMUL_FP64", Value: "1");
537
538 if ((FPU & SveMode) && HasMatmulFP32)
539 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_MATMUL_FP32", Value: "1");
540
541 if ((FPU & SveMode) && HasMatMul)
542 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_MATMUL_INT8", Value: "1");
543
544 if ((FPU & NeonMode) && HasFP16FML)
545 Builder.defineMacro(Name: "__ARM_FEATURE_FP16_FML", Value: "1");
546
547 if (Opts.hasSignReturnAddress()) {
548 // Bitmask:
549 // 0: Protection using the A key
550 // 1: Protection using the B key
551 // 2: Protection including leaf functions
552 // 3: Protection using PC as a diversifier
553 unsigned Value = 0;
554
555 if (Opts.isSignReturnAddressWithAKey())
556 Value |= (1 << 0);
557 else
558 Value |= (1 << 1);
559
560 if (Opts.isSignReturnAddressScopeAll())
561 Value |= (1 << 2);
562
563 if (Opts.BranchProtectionPAuthLR)
564 Value |= (1 << 3);
565
566 Builder.defineMacro(Name: "__ARM_FEATURE_PAC_DEFAULT", Value: std::to_string(val: Value));
567 }
568
569 if (Opts.BranchTargetEnforcement)
570 Builder.defineMacro(Name: "__ARM_FEATURE_BTI_DEFAULT", Value: "1");
571
572 if (Opts.GuardedControlStack)
573 Builder.defineMacro(Name: "__ARM_FEATURE_GCS_DEFAULT", Value: "1");
574
575 if (HasLS64)
576 Builder.defineMacro(Name: "__ARM_FEATURE_LS64", Value: "1");
577
578 if (HasRandGen)
579 Builder.defineMacro(Name: "__ARM_FEATURE_RNG", Value: "1");
580
581 if (HasMOPS)
582 Builder.defineMacro(Name: "__ARM_FEATURE_MOPS", Value: "1");
583
584 if (HasD128)
585 Builder.defineMacro(Name: "__ARM_FEATURE_SYSREG128", Value: "1");
586
587 if (HasGCS)
588 Builder.defineMacro(Name: "__ARM_FEATURE_GCS", Value: "1");
589
590 if (*ArchInfo == llvm::AArch64::ARMV8_1A)
591 getTargetDefinesARMV81A(Opts, Builder);
592 else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
593 getTargetDefinesARMV82A(Opts, Builder);
594 else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
595 getTargetDefinesARMV83A(Opts, Builder);
596 else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
597 getTargetDefinesARMV84A(Opts, Builder);
598 else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
599 getTargetDefinesARMV85A(Opts, Builder);
600 else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
601 getTargetDefinesARMV86A(Opts, Builder);
602 else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
603 getTargetDefinesARMV87A(Opts, Builder);
604 else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
605 getTargetDefinesARMV88A(Opts, Builder);
606 else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
607 getTargetDefinesARMV89A(Opts, Builder);
608 else if (*ArchInfo == llvm::AArch64::ARMV9A)
609 getTargetDefinesARMV9A(Opts, Builder);
610 else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
611 getTargetDefinesARMV91A(Opts, Builder);
612 else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
613 getTargetDefinesARMV92A(Opts, Builder);
614 else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
615 getTargetDefinesARMV93A(Opts, Builder);
616 else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
617 getTargetDefinesARMV94A(Opts, Builder);
618 else if (*ArchInfo == llvm::AArch64::ARMV9_5A)
619 getTargetDefinesARMV95A(Opts, Builder);
620
621 // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8|16) builtins work.
622 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
623 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
624 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
625 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
626 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
627
628 // Allow detection of fast FMA support.
629 Builder.defineMacro(Name: "__FP_FAST_FMA", Value: "1");
630 Builder.defineMacro(Name: "__FP_FAST_FMAF", Value: "1");
631
632 // C/C++ operators work on both VLS and VLA SVE types
633 if (FPU & SveMode)
634 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_VECTOR_OPERATORS", Value: "2");
635
636 if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
637 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_BITS", Value: Twine(Opts.VScaleMin * 128));
638 }
639}
640
641ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
642 return llvm::ArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
643 Builtin::FirstTSBuiltin);
644}
645
646std::optional<std::pair<unsigned, unsigned>>
647AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
648 if (LangOpts.VScaleMin || LangOpts.VScaleMax)
649 return std::pair<unsigned, unsigned>(
650 LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
651
652 if (hasFeature(Feature: "sve"))
653 return std::pair<unsigned, unsigned>(1, 16);
654
655 return std::nullopt;
656}
657
658unsigned AArch64TargetInfo::multiVersionSortPriority(StringRef Name) const {
659 if (Name == "default")
660 return 0;
661 if (auto Ext = llvm::AArch64::parseArchExtension(Extension: Name))
662 return Ext->FmvPriority;
663 return 0;
664}
665
666unsigned AArch64TargetInfo::multiVersionFeatureCost() const {
667 // Take the maximum priority as per feature cost, so more features win.
668 return llvm::AArch64::ExtensionInfo::MaxFMVPriority;
669}
670
671bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
672 if (auto Ext = llvm::AArch64::parseArchExtension(Extension: Name))
673 return !Ext->DependentFeatures.empty();
674 return false;
675}
676
677StringRef AArch64TargetInfo::getFeatureDependencies(StringRef Name) const {
678 if (auto Ext = llvm::AArch64::parseArchExtension(Extension: Name))
679 return Ext->DependentFeatures;
680 return StringRef();
681}
682
683bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
684 // CPU features might be separated by '+', extract them and check
685 llvm::SmallVector<StringRef, 8> Features;
686 FeatureStr.split(A&: Features, Separator: "+");
687 for (auto &Feature : Features)
688 if (!llvm::AArch64::parseArchExtension(Extension: Feature.trim()).has_value())
689 return false;
690 return true;
691}
692
693bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
694 return llvm::StringSwitch<bool>(Feature)
695 .Cases(S0: "aarch64", S1: "arm64", S2: "arm", Value: true)
696 .Case(S: "fmv", Value: HasFMV)
697 .Case(S: "fp", Value: FPU & FPUMode)
698 .Cases(S0: "neon", S1: "simd", Value: FPU & NeonMode)
699 .Case(S: "jscvt", Value: HasJSCVT)
700 .Case(S: "fcma", Value: HasFCMA)
701 .Case(S: "rng", Value: HasRandGen)
702 .Case(S: "flagm", Value: HasFlagM)
703 .Case(S: "flagm2", Value: HasAlternativeNZCV)
704 .Case(S: "fp16fml", Value: HasFP16FML)
705 .Case(S: "dotprod", Value: HasDotProd)
706 .Case(S: "sm4", Value: HasSM4)
707 .Case(S: "rdm", Value: HasRDM)
708 .Case(S: "lse", Value: HasLSE)
709 .Case(S: "crc", Value: HasCRC)
710 .Case(S: "sha2", Value: HasSHA2)
711 .Case(S: "sha3", Value: HasSHA3)
712 .Cases(S0: "aes", S1: "pmull", Value: HasAES)
713 .Cases(S0: "fp16", S1: "fullfp16", Value: HasFullFP16)
714 .Case(S: "dit", Value: HasDIT)
715 .Case(S: "dpb", Value: HasCCPP)
716 .Case(S: "dpb2", Value: HasCCDP)
717 .Case(S: "rcpc", Value: HasRCPC)
718 .Case(S: "frintts", Value: HasFRInt3264)
719 .Case(S: "i8mm", Value: HasMatMul)
720 .Case(S: "bf16", Value: HasBFloat16)
721 .Case(S: "sve", Value: FPU & SveMode)
722 .Case(S: "sve-bf16", Value: FPU & SveMode && HasBFloat16)
723 .Case(S: "sve-i8mm", Value: FPU & SveMode && HasMatMul)
724 .Case(S: "f32mm", Value: FPU & SveMode && HasMatmulFP32)
725 .Case(S: "f64mm", Value: FPU & SveMode && HasMatmulFP64)
726 .Case(S: "sve2", Value: FPU & SveMode && HasSVE2)
727 .Case(S: "sve2-pmull128", Value: FPU & SveMode && HasSVE2AES)
728 .Case(S: "sve2-bitperm", Value: FPU & SveMode && HasSVE2BitPerm)
729 .Case(S: "sve2-sha3", Value: FPU & SveMode && HasSVE2SHA3)
730 .Case(S: "sve2-sm4", Value: FPU & SveMode && HasSVE2SM4)
731 .Case(S: "sme", Value: HasSME)
732 .Case(S: "sme2", Value: HasSME2)
733 .Case(S: "sme-f64f64", Value: HasSMEF64F64)
734 .Case(S: "sme-i16i64", Value: HasSMEI16I64)
735 .Case(S: "sme-fa64", Value: HasSMEFA64)
736 .Cases(S0: "memtag", S1: "memtag2", Value: HasMTE)
737 .Case(S: "sb", Value: HasSB)
738 .Case(S: "predres", Value: HasPredRes)
739 .Cases(S0: "ssbs", S1: "ssbs2", Value: HasSSBS)
740 .Case(S: "bti", Value: HasBTI)
741 .Cases(S0: "ls64", S1: "ls64_v", S2: "ls64_accdata", Value: HasLS64)
742 .Case(S: "wfxt", Value: HasWFxT)
743 .Case(S: "rcpc3", Value: HasRCPC3)
744 .Default(Value: false);
745}
746
747void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
748 StringRef Name, bool Enabled) const {
749 Features[Name] = Enabled;
750 // If the feature is an architecture feature (like v8.2a), add all previous
751 // architecture versions and any dependant target features.
752 const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
753 llvm::AArch64::ArchInfo::findBySubArch(SubArch: Name);
754
755 if (!ArchInfo)
756 return; // Not an architecture, nothing more to do.
757
758 // Disabling an architecture feature does not affect dependent features
759 if (!Enabled)
760 return;
761
762 for (const auto *OtherArch : llvm::AArch64::ArchInfos)
763 if (ArchInfo->implies(Other: *OtherArch))
764 Features[OtherArch->getSubArch()] = true;
765
766 // Set any features implied by the architecture
767 std::vector<StringRef> CPUFeats;
768 if (llvm::AArch64::getExtensionFeatures(Extensions: ArchInfo->DefaultExts, Features&: CPUFeats)) {
769 for (auto F : CPUFeats) {
770 assert(F[0] == '+' && "Expected + in target feature!");
771 Features[F.drop_front(N: 1)] = true;
772 }
773 }
774}
775
776bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
777 DiagnosticsEngine &Diags) {
778 for (const auto &Feature : Features) {
779 if (Feature == "-fp-armv8")
780 HasNoFP = true;
781 if (Feature == "-neon")
782 HasNoNeon = true;
783 if (Feature == "-sve")
784 HasNoSVE = true;
785
786 if (Feature == "+neon" || Feature == "+fp-armv8")
787 FPU |= NeonMode;
788 if (Feature == "+jscvt") {
789 HasJSCVT = true;
790 FPU |= NeonMode;
791 }
792 if (Feature == "+fcma") {
793 HasFCMA = true;
794 FPU |= NeonMode;
795 }
796
797 if (Feature == "+sve") {
798 FPU |= NeonMode;
799 FPU |= SveMode;
800 HasFullFP16 = true;
801 }
802 if (Feature == "+sve2") {
803 FPU |= NeonMode;
804 FPU |= SveMode;
805 HasFullFP16 = true;
806 HasSVE2 = true;
807 }
808 if (Feature == "+sve2-aes") {
809 FPU |= NeonMode;
810 FPU |= SveMode;
811 HasFullFP16 = true;
812 HasSVE2 = true;
813 HasSVE2AES = true;
814 }
815 if (Feature == "+sve2-sha3") {
816 FPU |= NeonMode;
817 FPU |= SveMode;
818 HasFullFP16 = true;
819 HasSVE2 = true;
820 HasSVE2SHA3 = true;
821 }
822 if (Feature == "+sve2-sm4") {
823 FPU |= NeonMode;
824 FPU |= SveMode;
825 HasFullFP16 = true;
826 HasSVE2 = true;
827 HasSVE2SM4 = true;
828 }
829 if (Feature == "+sve2-bitperm") {
830 FPU |= NeonMode;
831 FPU |= SveMode;
832 HasFullFP16 = true;
833 HasSVE2 = true;
834 HasSVE2BitPerm = true;
835 }
836 if (Feature == "+f32mm") {
837 FPU |= NeonMode;
838 FPU |= SveMode;
839 HasFullFP16 = true;
840 HasMatmulFP32 = true;
841 }
842 if (Feature == "+f64mm") {
843 FPU |= NeonMode;
844 FPU |= SveMode;
845 HasFullFP16 = true;
846 HasMatmulFP64 = true;
847 }
848 if (Feature == "+sme") {
849 HasSME = true;
850 HasBFloat16 = true;
851 HasFullFP16 = true;
852 }
853 if (Feature == "+sme2") {
854 HasSME = true;
855 HasSME2 = true;
856 HasBFloat16 = true;
857 HasFullFP16 = true;
858 }
859 if (Feature == "+sme-f64f64") {
860 HasSME = true;
861 HasSMEF64F64 = true;
862 HasBFloat16 = true;
863 HasFullFP16 = true;
864 }
865 if (Feature == "+sme-i16i64") {
866 HasSME = true;
867 HasSMEI16I64 = true;
868 HasBFloat16 = true;
869 HasFullFP16 = true;
870 }
871 if (Feature == "+sme-fa64") {
872 FPU |= NeonMode;
873 FPU |= SveMode;
874 HasSME = true;
875 HasSVE2 = true;
876 HasSMEFA64 = true;
877 }
878 if (Feature == "+sb")
879 HasSB = true;
880 if (Feature == "+predres")
881 HasPredRes = true;
882 if (Feature == "+ssbs")
883 HasSSBS = true;
884 if (Feature == "+bti")
885 HasBTI = true;
886 if (Feature == "+wfxt")
887 HasWFxT = true;
888 if (Feature == "-fmv")
889 HasFMV = false;
890 if (Feature == "+crc")
891 HasCRC = true;
892 if (Feature == "+rcpc")
893 HasRCPC = true;
894 if (Feature == "+aes") {
895 FPU |= NeonMode;
896 HasAES = true;
897 }
898 if (Feature == "+sha2") {
899 FPU |= NeonMode;
900 HasSHA2 = true;
901 }
902 if (Feature == "+sha3") {
903 FPU |= NeonMode;
904 HasSHA2 = true;
905 HasSHA3 = true;
906 }
907 if (Feature == "+rdm") {
908 FPU |= NeonMode;
909 HasRDM = true;
910 }
911 if (Feature == "+dit")
912 HasDIT = true;
913 if (Feature == "+cccp")
914 HasCCPP = true;
915 if (Feature == "+ccdp") {
916 HasCCPP = true;
917 HasCCDP = true;
918 }
919 if (Feature == "+fptoint")
920 HasFRInt3264 = true;
921 if (Feature == "+sm4") {
922 FPU |= NeonMode;
923 HasSM4 = true;
924 }
925 if (Feature == "+strict-align")
926 HasUnalignedAccess = false;
927
928 // All predecessor archs are added but select the latest one for ArchKind.
929 if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
930 ArchInfo = &llvm::AArch64::ARMV8A;
931 if (Feature == "+v8.1a" &&
932 ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
933 ArchInfo = &llvm::AArch64::ARMV8_1A;
934 if (Feature == "+v8.2a" &&
935 ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
936 ArchInfo = &llvm::AArch64::ARMV8_2A;
937 if (Feature == "+v8.3a" &&
938 ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
939 ArchInfo = &llvm::AArch64::ARMV8_3A;
940 if (Feature == "+v8.4a" &&
941 ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
942 ArchInfo = &llvm::AArch64::ARMV8_4A;
943 if (Feature == "+v8.5a" &&
944 ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
945 ArchInfo = &llvm::AArch64::ARMV8_5A;
946 if (Feature == "+v8.6a" &&
947 ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
948 ArchInfo = &llvm::AArch64::ARMV8_6A;
949 if (Feature == "+v8.7a" &&
950 ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
951 ArchInfo = &llvm::AArch64::ARMV8_7A;
952 if (Feature == "+v8.8a" &&
953 ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
954 ArchInfo = &llvm::AArch64::ARMV8_8A;
955 if (Feature == "+v8.9a" &&
956 ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
957 ArchInfo = &llvm::AArch64::ARMV8_9A;
958 if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
959 ArchInfo = &llvm::AArch64::ARMV9A;
960 if (Feature == "+v9.1a" &&
961 ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
962 ArchInfo = &llvm::AArch64::ARMV9_1A;
963 if (Feature == "+v9.2a" &&
964 ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
965 ArchInfo = &llvm::AArch64::ARMV9_2A;
966 if (Feature == "+v9.3a" &&
967 ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
968 ArchInfo = &llvm::AArch64::ARMV9_3A;
969 if (Feature == "+v9.4a" &&
970 ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
971 ArchInfo = &llvm::AArch64::ARMV9_4A;
972 if (Feature == "+v9.5a" &&
973 ArchInfo->Version < llvm::AArch64::ARMV9_5A.Version)
974 ArchInfo = &llvm::AArch64::ARMV9_5A;
975 if (Feature == "+v8r")
976 ArchInfo = &llvm::AArch64::ARMV8R;
977 if (Feature == "+fullfp16") {
978 FPU |= NeonMode;
979 HasFullFP16 = true;
980 }
981 if (Feature == "+dotprod") {
982 FPU |= NeonMode;
983 HasDotProd = true;
984 }
985 if (Feature == "+fp16fml") {
986 FPU |= NeonMode;
987 HasFullFP16 = true;
988 HasFP16FML = true;
989 }
990 if (Feature == "+mte")
991 HasMTE = true;
992 if (Feature == "+tme")
993 HasTME = true;
994 if (Feature == "+pauth")
995 HasPAuth = true;
996 if (Feature == "+i8mm")
997 HasMatMul = true;
998 if (Feature == "+bf16")
999 HasBFloat16 = true;
1000 if (Feature == "+lse")
1001 HasLSE = true;
1002 if (Feature == "+ls64")
1003 HasLS64 = true;
1004 if (Feature == "+rand")
1005 HasRandGen = true;
1006 if (Feature == "+flagm")
1007 HasFlagM = true;
1008 if (Feature == "+altnzcv") {
1009 HasFlagM = true;
1010 HasAlternativeNZCV = true;
1011 }
1012 if (Feature == "+mops")
1013 HasMOPS = true;
1014 if (Feature == "+d128")
1015 HasD128 = true;
1016 if (Feature == "+gcs")
1017 HasGCS = true;
1018 if (Feature == "+rcpc3")
1019 HasRCPC3 = true;
1020 if (Feature == "+pauth-lr") {
1021 HasPAuthLR = true;
1022 HasPAuth = true;
1023 }
1024 }
1025
1026 // Check features that are manually disabled by command line options.
1027 // This needs to be checked after architecture-related features are handled,
1028 // making sure they are properly disabled when required.
1029 for (const auto &Feature : Features) {
1030 if (Feature == "-d128")
1031 HasD128 = false;
1032 }
1033
1034 setDataLayout();
1035 setArchFeatures();
1036
1037 if (HasNoFP) {
1038 FPU &= ~FPUMode;
1039 FPU &= ~NeonMode;
1040 FPU &= ~SveMode;
1041 }
1042 if (HasNoNeon) {
1043 FPU &= ~NeonMode;
1044 FPU &= ~SveMode;
1045 }
1046 if (HasNoSVE)
1047 FPU &= ~SveMode;
1048
1049 return true;
1050}
1051
1052bool AArch64TargetInfo::initFeatureMap(
1053 llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
1054 const std::vector<std::string> &FeaturesVec) const {
1055 std::vector<std::string> UpdatedFeaturesVec;
1056 // Parse the CPU and add any implied features.
1057 std::optional<llvm::AArch64::CpuInfo> CpuInfo = llvm::AArch64::parseCpu(Name: CPU);
1058 if (CpuInfo) {
1059 auto Exts = CpuInfo->getImpliedExtensions();
1060 std::vector<StringRef> CPUFeats;
1061 llvm::AArch64::getExtensionFeatures(Extensions: Exts, Features&: CPUFeats);
1062 for (auto F : CPUFeats) {
1063 assert((F[0] == '+' || F[0] == '-') && "Expected +/- in target feature!");
1064 UpdatedFeaturesVec.push_back(x: F.str());
1065 }
1066 }
1067
1068 // Process target and dependent features. This is done in two loops collecting
1069 // them into UpdatedFeaturesVec: first to add dependent '+'features, second to
1070 // add target '+/-'features that can later disable some of features added on
1071 // the first loop. Function Multi Versioning features begin with '?'.
1072 for (const auto &Feature : FeaturesVec)
1073 if (((Feature[0] == '?' || Feature[0] == '+')) &&
1074 AArch64TargetInfo::doesFeatureAffectCodeGen(Name: Feature.substr(pos: 1))) {
1075 StringRef DepFeatures =
1076 AArch64TargetInfo::getFeatureDependencies(Name: Feature.substr(pos: 1));
1077 SmallVector<StringRef, 1> AttrFeatures;
1078 DepFeatures.split(A&: AttrFeatures, Separator: ",");
1079 for (auto F : AttrFeatures)
1080 UpdatedFeaturesVec.push_back(x: F.str());
1081 }
1082 for (const auto &Feature : FeaturesVec)
1083 if (Feature[0] != '?') {
1084 std::string UpdatedFeature = Feature;
1085 if (Feature[0] == '+') {
1086 std::optional<llvm::AArch64::ExtensionInfo> Extension =
1087 llvm::AArch64::parseArchExtension(Extension: Feature.substr(pos: 1));
1088 if (Extension)
1089 UpdatedFeature = Extension->Feature.str();
1090 }
1091 UpdatedFeaturesVec.push_back(x: UpdatedFeature);
1092 }
1093
1094 return TargetInfo::initFeatureMap(Features, Diags, CPU, FeatureVec: UpdatedFeaturesVec);
1095}
1096
1097// Parse AArch64 Target attributes, which are a comma separated list of:
1098// "arch=<arch>" - parsed to features as per -march=..
1099// "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
1100// "tune=<cpu>" - TuneCPU set to <cpu>
1101// "feature", "no-feature" - Add (or remove) feature.
1102// "+feature", "+nofeature" - Add (or remove) feature.
1103ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
1104 ParsedTargetAttr Ret;
1105 if (Features == "default")
1106 return Ret;
1107 SmallVector<StringRef, 1> AttrFeatures;
1108 Features.split(A&: AttrFeatures, Separator: ",");
1109 bool FoundArch = false;
1110
1111 auto SplitAndAddFeatures = [](StringRef FeatString,
1112 std::vector<std::string> &Features) {
1113 SmallVector<StringRef, 8> SplitFeatures;
1114 FeatString.split(A&: SplitFeatures, Separator: StringRef("+"), MaxSplit: -1, KeepEmpty: false);
1115 for (StringRef Feature : SplitFeatures) {
1116 StringRef FeatureName = llvm::AArch64::getArchExtFeature(ArchExt: Feature);
1117 if (!FeatureName.empty())
1118 Features.push_back(x: FeatureName.str());
1119 else
1120 // Pushing the original feature string to give a sema error later on
1121 // when they get checked.
1122 if (Feature.starts_with(Prefix: "no"))
1123 Features.push_back(x: "-" + Feature.drop_front(N: 2).str());
1124 else
1125 Features.push_back(x: "+" + Feature.str());
1126 }
1127 };
1128
1129 for (auto &Feature : AttrFeatures) {
1130 Feature = Feature.trim();
1131 if (Feature.starts_with(Prefix: "fpmath="))
1132 continue;
1133
1134 if (Feature.starts_with(Prefix: "branch-protection=")) {
1135 Ret.BranchProtection = Feature.split(Separator: '=').second.trim();
1136 continue;
1137 }
1138
1139 if (Feature.starts_with(Prefix: "arch=")) {
1140 if (FoundArch)
1141 Ret.Duplicate = "arch=";
1142 FoundArch = true;
1143 std::pair<StringRef, StringRef> Split =
1144 Feature.split(Separator: "=").second.trim().split(Separator: "+");
1145 const llvm::AArch64::ArchInfo *AI = llvm::AArch64::parseArch(Arch: Split.first);
1146
1147 // Parse the architecture version, adding the required features to
1148 // Ret.Features.
1149 if (!AI)
1150 continue;
1151 Ret.Features.push_back(x: AI->ArchFeature.str());
1152 // Add any extra features, after the +
1153 SplitAndAddFeatures(Split.second, Ret.Features);
1154 } else if (Feature.starts_with(Prefix: "cpu=")) {
1155 if (!Ret.CPU.empty())
1156 Ret.Duplicate = "cpu=";
1157 else {
1158 // Split the cpu string into "cpu=", "cortex-a710" and any remaining
1159 // "+feat" features.
1160 std::pair<StringRef, StringRef> Split =
1161 Feature.split(Separator: "=").second.trim().split(Separator: "+");
1162 Ret.CPU = Split.first;
1163 SplitAndAddFeatures(Split.second, Ret.Features);
1164 }
1165 } else if (Feature.starts_with(Prefix: "tune=")) {
1166 if (!Ret.Tune.empty())
1167 Ret.Duplicate = "tune=";
1168 else
1169 Ret.Tune = Feature.split(Separator: "=").second.trim();
1170 } else if (Feature.starts_with(Prefix: "+")) {
1171 SplitAndAddFeatures(Feature, Ret.Features);
1172 } else if (Feature.starts_with(Prefix: "no-")) {
1173 StringRef FeatureName =
1174 llvm::AArch64::getArchExtFeature(ArchExt: Feature.split(Separator: "-").second);
1175 if (!FeatureName.empty())
1176 Ret.Features.push_back(x: "-" + FeatureName.drop_front(N: 1).str());
1177 else
1178 Ret.Features.push_back(x: "-" + Feature.split(Separator: "-").second.str());
1179 } else {
1180 // Try parsing the string to the internal target feature name. If it is
1181 // invalid, add the original string (which could already be an internal
1182 // name). These should be checked later by isValidFeatureName.
1183 StringRef FeatureName = llvm::AArch64::getArchExtFeature(ArchExt: Feature);
1184 if (!FeatureName.empty())
1185 Ret.Features.push_back(x: FeatureName.str());
1186 else
1187 Ret.Features.push_back(x: "+" + Feature.str());
1188 }
1189 }
1190 return Ret;
1191}
1192
1193bool AArch64TargetInfo::hasBFloat16Type() const {
1194 return true;
1195}
1196
1197TargetInfo::CallingConvCheckResult
1198AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
1199 switch (CC) {
1200 case CC_C:
1201 case CC_Swift:
1202 case CC_SwiftAsync:
1203 case CC_PreserveMost:
1204 case CC_PreserveAll:
1205 case CC_OpenCLKernel:
1206 case CC_AArch64VectorCall:
1207 case CC_AArch64SVEPCS:
1208 case CC_Win64:
1209 return CCCR_OK;
1210 default:
1211 return CCCR_Warning;
1212 }
1213}
1214
1215bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
1216
1217TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
1218 return TargetInfo::AArch64ABIBuiltinVaList;
1219}
1220
1221const char *const AArch64TargetInfo::GCCRegNames[] = {
1222 // clang-format off
1223
1224 // 32-bit Integer registers
1225 "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
1226 "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
1227 "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
1228
1229 // 64-bit Integer registers
1230 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
1231 "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
1232 "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
1233
1234 // 32-bit floating point regsisters
1235 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
1236 "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
1237 "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
1238
1239 // 64-bit floating point regsisters
1240 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
1241 "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
1242 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
1243
1244 // Neon vector registers
1245 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
1246 "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
1247 "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
1248
1249 // SVE vector registers
1250 "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10",
1251 "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
1252 "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
1253
1254 // SVE predicate registers
1255 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10",
1256 "p11", "p12", "p13", "p14", "p15",
1257
1258 // SVE predicate-as-counter registers
1259 "pn0", "pn1", "pn2", "pn3", "pn4", "pn5", "pn6", "pn7", "pn8",
1260 "pn9", "pn10", "pn11", "pn12", "pn13", "pn14", "pn15",
1261
1262 // SME registers
1263 "za", "zt0",
1264
1265 // clang-format on
1266};
1267
1268ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
1269 return llvm::ArrayRef(GCCRegNames);
1270}
1271
1272const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
1273 {.Aliases: {"w31"}, .Register: "wsp"},
1274 {.Aliases: {"x31"}, .Register: "sp"},
1275 // GCC rN registers are aliases of xN registers.
1276 {.Aliases: {"r0"}, .Register: "x0"},
1277 {.Aliases: {"r1"}, .Register: "x1"},
1278 {.Aliases: {"r2"}, .Register: "x2"},
1279 {.Aliases: {"r3"}, .Register: "x3"},
1280 {.Aliases: {"r4"}, .Register: "x4"},
1281 {.Aliases: {"r5"}, .Register: "x5"},
1282 {.Aliases: {"r6"}, .Register: "x6"},
1283 {.Aliases: {"r7"}, .Register: "x7"},
1284 {.Aliases: {"r8"}, .Register: "x8"},
1285 {.Aliases: {"r9"}, .Register: "x9"},
1286 {.Aliases: {"r10"}, .Register: "x10"},
1287 {.Aliases: {"r11"}, .Register: "x11"},
1288 {.Aliases: {"r12"}, .Register: "x12"},
1289 {.Aliases: {"r13"}, .Register: "x13"},
1290 {.Aliases: {"r14"}, .Register: "x14"},
1291 {.Aliases: {"r15"}, .Register: "x15"},
1292 {.Aliases: {"r16"}, .Register: "x16"},
1293 {.Aliases: {"r17"}, .Register: "x17"},
1294 {.Aliases: {"r18"}, .Register: "x18"},
1295 {.Aliases: {"r19"}, .Register: "x19"},
1296 {.Aliases: {"r20"}, .Register: "x20"},
1297 {.Aliases: {"r21"}, .Register: "x21"},
1298 {.Aliases: {"r22"}, .Register: "x22"},
1299 {.Aliases: {"r23"}, .Register: "x23"},
1300 {.Aliases: {"r24"}, .Register: "x24"},
1301 {.Aliases: {"r25"}, .Register: "x25"},
1302 {.Aliases: {"r26"}, .Register: "x26"},
1303 {.Aliases: {"r27"}, .Register: "x27"},
1304 {.Aliases: {"r28"}, .Register: "x28"},
1305 {.Aliases: {"r29", "x29"}, .Register: "fp"},
1306 {.Aliases: {"r30", "x30"}, .Register: "lr"},
1307 // The S/D/Q and W/X registers overlap, but aren't really aliases; we
1308 // don't want to substitute one of these for a different-sized one.
1309};
1310
1311ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
1312 return llvm::ArrayRef(GCCRegAliases);
1313}
1314
1315// Returns the length of cc constraint.
1316static unsigned matchAsmCCConstraint(const char *Name) {
1317 constexpr unsigned len = 5;
1318 auto RV = llvm::StringSwitch<unsigned>(Name)
1319 .Case(S: "@cceq", Value: len)
1320 .Case(S: "@ccne", Value: len)
1321 .Case(S: "@cchs", Value: len)
1322 .Case(S: "@cccs", Value: len)
1323 .Case(S: "@cccc", Value: len)
1324 .Case(S: "@cclo", Value: len)
1325 .Case(S: "@ccmi", Value: len)
1326 .Case(S: "@ccpl", Value: len)
1327 .Case(S: "@ccvs", Value: len)
1328 .Case(S: "@ccvc", Value: len)
1329 .Case(S: "@cchi", Value: len)
1330 .Case(S: "@ccls", Value: len)
1331 .Case(S: "@ccge", Value: len)
1332 .Case(S: "@cclt", Value: len)
1333 .Case(S: "@ccgt", Value: len)
1334 .Case(S: "@ccle", Value: len)
1335 .Default(Value: 0);
1336 return RV;
1337}
1338
1339std::string
1340AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
1341 std::string R;
1342 switch (*Constraint) {
1343 case 'U': // Three-character constraint; add "@3" hint for later parsing.
1344 R = std::string("@3") + std::string(Constraint, 3);
1345 Constraint += 2;
1346 break;
1347 case '@':
1348 if (const unsigned Len = matchAsmCCConstraint(Name: Constraint)) {
1349 std::string Converted = "{" + std::string(Constraint, Len) + "}";
1350 Constraint += Len - 1;
1351 return Converted;
1352 }
1353 return std::string(1, *Constraint);
1354 default:
1355 R = TargetInfo::convertConstraint(Constraint);
1356 break;
1357 }
1358 return R;
1359}
1360
1361bool AArch64TargetInfo::validateAsmConstraint(
1362 const char *&Name, TargetInfo::ConstraintInfo &Info) const {
1363 switch (*Name) {
1364 default:
1365 return false;
1366 case 'w': // Floating point and SIMD registers (V0-V31)
1367 Info.setAllowsRegister();
1368 return true;
1369 case 'I': // Constant that can be used with an ADD instruction
1370 case 'J': // Constant that can be used with a SUB instruction
1371 case 'K': // Constant that can be used with a 32-bit logical instruction
1372 case 'L': // Constant that can be used with a 64-bit logical instruction
1373 case 'M': // Constant that can be used as a 32-bit MOV immediate
1374 case 'N': // Constant that can be used as a 64-bit MOV immediate
1375 case 'Y': // Floating point constant zero
1376 case 'Z': // Integer constant zero
1377 return true;
1378 case 'Q': // A memory reference with base register and no offset
1379 Info.setAllowsMemory();
1380 return true;
1381 case 'S': // A symbolic address
1382 Info.setAllowsRegister();
1383 return true;
1384 case 'U':
1385 if (Name[1] == 'p' &&
1386 (Name[2] == 'l' || Name[2] == 'a' || Name[2] == 'h')) {
1387 // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7, "Uph"=P8-P15)
1388 Info.setAllowsRegister();
1389 Name += 2;
1390 return true;
1391 }
1392 if (Name[1] == 'c' && (Name[2] == 'i' || Name[2] == 'j')) {
1393 // Gpr registers ("Uci"=w8-11, "Ucj"=w12-15)
1394 Info.setAllowsRegister();
1395 Name += 2;
1396 return true;
1397 }
1398 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
1399 // Utf: A memory address suitable for ldp/stp in TF mode.
1400 // Usa: An absolute symbolic address.
1401 // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
1402
1403 // Better to return an error saying that it's an unrecognised constraint
1404 // even if this is a valid constraint in gcc.
1405 return false;
1406 case 'z': // Zero register, wzr or xzr
1407 Info.setAllowsRegister();
1408 return true;
1409 case 'x': // Floating point and SIMD registers (V0-V15)
1410 Info.setAllowsRegister();
1411 return true;
1412 case 'y': // SVE registers (V0-V7)
1413 Info.setAllowsRegister();
1414 return true;
1415 case '@':
1416 // CC condition
1417 if (const unsigned Len = matchAsmCCConstraint(Name)) {
1418 Name += Len - 1;
1419 Info.setAllowsRegister();
1420 return true;
1421 }
1422 }
1423 return false;
1424}
1425
1426bool AArch64TargetInfo::validateConstraintModifier(
1427 StringRef Constraint, char Modifier, unsigned Size,
1428 std::string &SuggestedModifier) const {
1429 // Strip off constraint modifiers.
1430 Constraint = Constraint.ltrim(Chars: "=+&");
1431
1432 switch (Constraint[0]) {
1433 default:
1434 return true;
1435 case 'z':
1436 case 'r': {
1437 switch (Modifier) {
1438 case 'x':
1439 case 'w':
1440 // For now assume that the person knows what they're
1441 // doing with the modifier.
1442 return true;
1443 default:
1444 // By default an 'r' constraint will be in the 'x'
1445 // registers.
1446 if (Size == 64)
1447 return true;
1448
1449 if (Size == 512)
1450 return HasLS64;
1451
1452 SuggestedModifier = "w";
1453 return false;
1454 }
1455 }
1456 }
1457}
1458
1459std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
1460
1461int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
1462 if (RegNo == 0)
1463 return 0;
1464 if (RegNo == 1)
1465 return 1;
1466 return -1;
1467}
1468
1469bool AArch64TargetInfo::validatePointerAuthKey(
1470 const llvm::APSInt &value) const {
1471 return 0 <= value && value <= 3;
1472}
1473
1474bool AArch64TargetInfo::hasInt128Type() const { return true; }
1475
1476AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
1477 const TargetOptions &Opts)
1478 : AArch64TargetInfo(Triple, Opts) {}
1479
1480void AArch64leTargetInfo::setDataLayout() {
1481 if (getTriple().isOSBinFormatMachO()) {
1482 if(getTriple().isArch32Bit())
1483 resetDataLayout(DL: "e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", UserLabelPrefix: "_");
1484 else
1485 resetDataLayout(DL: "e-m:o-i64:64-i128:128-n32:64-S128", UserLabelPrefix: "_");
1486 } else
1487 resetDataLayout(DL: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1488}
1489
1490void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
1491 MacroBuilder &Builder) const {
1492 Builder.defineMacro(Name: "__AARCH64EL__");
1493 AArch64TargetInfo::getTargetDefines(Opts, Builder);
1494}
1495
1496AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
1497 const TargetOptions &Opts)
1498 : AArch64TargetInfo(Triple, Opts) {}
1499
1500void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
1501 MacroBuilder &Builder) const {
1502 Builder.defineMacro(Name: "__AARCH64EB__");
1503 Builder.defineMacro(Name: "__AARCH_BIG_ENDIAN");
1504 Builder.defineMacro(Name: "__ARM_BIG_ENDIAN");
1505 AArch64TargetInfo::getTargetDefines(Opts, Builder);
1506}
1507
1508void AArch64beTargetInfo::setDataLayout() {
1509 assert(!getTriple().isOSBinFormatMachO());
1510 resetDataLayout(DL: "E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1511}
1512
1513WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
1514 const TargetOptions &Opts)
1515 : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
1516
1517 // This is an LLP64 platform.
1518 // int:4, long:4, long long:8, long double:8.
1519 IntWidth = IntAlign = 32;
1520 LongWidth = LongAlign = 32;
1521 DoubleAlign = LongLongAlign = 64;
1522 LongDoubleWidth = LongDoubleAlign = 64;
1523 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1524 IntMaxType = SignedLongLong;
1525 Int64Type = SignedLongLong;
1526 SizeType = UnsignedLongLong;
1527 PtrDiffType = SignedLongLong;
1528 IntPtrType = SignedLongLong;
1529}
1530
1531void WindowsARM64TargetInfo::setDataLayout() {
1532 resetDataLayout(DL: Triple.isOSBinFormatMachO()
1533 ? "e-m:o-i64:64-i128:128-n32:64-S128"
1534 : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
1535 UserLabelPrefix: Triple.isOSBinFormatMachO() ? "_" : "");
1536}
1537
1538TargetInfo::BuiltinVaListKind
1539WindowsARM64TargetInfo::getBuiltinVaListKind() const {
1540 return TargetInfo::CharPtrBuiltinVaList;
1541}
1542
1543TargetInfo::CallingConvCheckResult
1544WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
1545 switch (CC) {
1546 case CC_X86VectorCall:
1547 if (getTriple().isWindowsArm64EC())
1548 return CCCR_OK;
1549 return CCCR_Ignore;
1550 case CC_X86StdCall:
1551 case CC_X86ThisCall:
1552 case CC_X86FastCall:
1553 return CCCR_Ignore;
1554 case CC_C:
1555 case CC_OpenCLKernel:
1556 case CC_PreserveMost:
1557 case CC_PreserveAll:
1558 case CC_Swift:
1559 case CC_SwiftAsync:
1560 case CC_Win64:
1561 return CCCR_OK;
1562 default:
1563 return CCCR_Warning;
1564 }
1565}
1566
1567MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
1568 const TargetOptions &Opts)
1569 : WindowsARM64TargetInfo(Triple, Opts) {
1570 TheCXXABI.set(TargetCXXABI::Microsoft);
1571}
1572
1573void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
1574 MacroBuilder &Builder) const {
1575 WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
1576 if (getTriple().isWindowsArm64EC()) {
1577 Builder.defineMacro(Name: "_M_X64", Value: "100");
1578 Builder.defineMacro(Name: "_M_AMD64", Value: "100");
1579 Builder.defineMacro(Name: "_M_ARM64EC", Value: "1");
1580 } else {
1581 Builder.defineMacro(Name: "_M_ARM64", Value: "1");
1582 }
1583}
1584
1585TargetInfo::CallingConvKind
1586MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
1587 return CCK_MicrosoftWin64;
1588}
1589
1590unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize,
1591 bool HasNonWeakDef) const {
1592 unsigned Align =
1593 WindowsARM64TargetInfo::getMinGlobalAlign(Size: TypeSize, HasNonWeakDef);
1594
1595 // MSVC does size based alignment for arm64 based on alignment section in
1596 // below document, replicate that to keep alignment consistent with object
1597 // files compiled by MSVC.
1598 // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
1599 if (TypeSize >= 512) { // TypeSize >= 64 bytes
1600 Align = std::max(a: Align, b: 128u); // align type at least 16 bytes
1601 } else if (TypeSize >= 64) { // TypeSize >= 8 bytes
1602 Align = std::max(a: Align, b: 64u); // align type at least 8 butes
1603 } else if (TypeSize >= 16) { // TypeSize >= 2 bytes
1604 Align = std::max(a: Align, b: 32u); // align type at least 4 bytes
1605 }
1606 return Align;
1607}
1608
1609MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1610 const TargetOptions &Opts)
1611 : WindowsARM64TargetInfo(Triple, Opts) {
1612 TheCXXABI.set(TargetCXXABI::GenericAArch64);
1613}
1614
1615DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1616 const TargetOptions &Opts)
1617 : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1618 Int64Type = SignedLongLong;
1619 if (getTriple().isArch32Bit())
1620 IntMaxType = SignedLongLong;
1621
1622 WCharType = SignedInt;
1623 UseSignedCharForObjCBool = false;
1624
1625 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1626 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1627
1628 UseZeroLengthBitfieldAlignment = false;
1629
1630 if (getTriple().isArch32Bit()) {
1631 UseBitFieldTypeAlignment = false;
1632 ZeroLengthBitfieldBoundary = 32;
1633 UseZeroLengthBitfieldAlignment = true;
1634 TheCXXABI.set(TargetCXXABI::WatchOS);
1635 } else
1636 TheCXXABI.set(TargetCXXABI::AppleARM64);
1637}
1638
1639void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1640 const llvm::Triple &Triple,
1641 MacroBuilder &Builder) const {
1642 Builder.defineMacro(Name: "__AARCH64_SIMD__");
1643 if (Triple.isArch32Bit())
1644 Builder.defineMacro(Name: "__ARM64_ARCH_8_32__");
1645 else
1646 Builder.defineMacro(Name: "__ARM64_ARCH_8__");
1647 Builder.defineMacro(Name: "__ARM_NEON__");
1648 Builder.defineMacro(Name: "__REGISTER_PREFIX__", Value: "");
1649 Builder.defineMacro(Name: "__arm64", Value: "1");
1650 Builder.defineMacro(Name: "__arm64__", Value: "1");
1651
1652 if (Triple.isArm64e())
1653 Builder.defineMacro(Name: "__arm64e__", Value: "1");
1654
1655 getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1656}
1657
1658TargetInfo::BuiltinVaListKind
1659DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1660 return TargetInfo::CharPtrBuiltinVaList;
1661}
1662
1663// 64-bit RenderScript is aarch64
1664RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1665 const TargetOptions &Opts)
1666 : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1667 Triple.getOSName(),
1668 Triple.getEnvironmentName()),
1669 Opts) {
1670 IsRenderScriptTarget = true;
1671}
1672
1673void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1674 MacroBuilder &Builder) const {
1675 Builder.defineMacro(Name: "__RENDERSCRIPT__");
1676 AArch64leTargetInfo::getTargetDefines(Opts, Builder);
1677}
1678

source code of clang/lib/Basic/Targets/AArch64.cpp