1//===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements AArch64 TargetInfo objects.
10//
11//===----------------------------------------------------------------------===//
12
13#include "AArch64.h"
14#include "clang/Basic/Diagnostic.h"
15#include "clang/Basic/LangOptions.h"
16#include "clang/Basic/TargetBuiltins.h"
17#include "clang/Basic/TargetInfo.h"
18#include "llvm/ADT/APSInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/StringSwitch.h"
21#include "llvm/TargetParser/AArch64TargetParser.h"
22#include "llvm/TargetParser/ARMTargetParserCommon.h"
23#include <optional>
24
25using namespace clang;
26using namespace clang::targets;
27
28static constexpr int NumNeonBuiltins =
29 NEON::FirstFp16Builtin - Builtin::FirstTSBuiltin;
30static constexpr int NumFp16Builtins =
31 NEON::FirstTSBuiltin - NEON::FirstFp16Builtin;
32static constexpr int NumSVEBuiltins =
33 SVE::FirstNeonBridgeBuiltin - NEON::FirstTSBuiltin;
34static constexpr int NumSVENeonBridgeBuiltins =
35 SVE::FirstTSBuiltin - SVE::FirstNeonBridgeBuiltin;
36static constexpr int NumSMEBuiltins = SME::FirstTSBuiltin - SVE::FirstTSBuiltin;
37static constexpr int NumAArch64Builtins =
38 AArch64::LastTSBuiltin - SME::FirstTSBuiltin;
39static constexpr int NumBuiltins =
40 AArch64::LastTSBuiltin - Builtin::FirstTSBuiltin;
41static_assert(NumBuiltins ==
42 (NumNeonBuiltins + NumFp16Builtins + NumSVEBuiltins +
43 NumSVENeonBridgeBuiltins + NumSMEBuiltins + NumAArch64Builtins));
44
45namespace clang {
46namespace NEON {
47#define GET_NEON_BUILTIN_STR_TABLE
48#include "clang/Basic/arm_neon.inc"
49#undef GET_NEON_BUILTIN_STR_TABLE
50
51static constexpr std::array<Builtin::Info, NumNeonBuiltins> BuiltinInfos = {
52#define GET_NEON_BUILTIN_INFOS
53#include "clang/Basic/arm_neon.inc"
54#undef GET_NEON_BUILTIN_INFOS
55};
56
57namespace FP16 {
58#define GET_NEON_BUILTIN_STR_TABLE
59#include "clang/Basic/arm_fp16.inc"
60#undef GET_NEON_BUILTIN_STR_TABLE
61
62static constexpr std::array<Builtin::Info, NumFp16Builtins> BuiltinInfos = {
63#define GET_NEON_BUILTIN_INFOS
64#include "clang/Basic/arm_fp16.inc"
65#undef GET_NEON_BUILTIN_INFOS
66};
67} // namespace FP16
68} // namespace NEON
69
70namespace SVE {
71#define GET_SVE_BUILTIN_STR_TABLE
72#include "clang/Basic/arm_sve_builtins.inc"
73#undef GET_SVE_BUILTIN_STR_TABLE
74
75static constexpr std::array<Builtin::Info, NumSVEBuiltins> BuiltinInfos = {
76#define GET_SVE_BUILTIN_INFOS
77#include "clang/Basic/arm_sve_builtins.inc"
78#undef GET_SVE_BUILTIN_INFOS
79};
80} // namespace SVE
81
82namespace SME {
83#define GET_SME_BUILTIN_STR_TABLE
84#include "clang/Basic/arm_sme_builtins.inc"
85#undef GET_SME_BUILTIN_STR_TABLE
86
87static constexpr std::array<Builtin::Info, NumSMEBuiltins> BuiltinInfos = {
88#define GET_SME_BUILTIN_INFOS
89#include "clang/Basic/arm_sme_builtins.inc"
90#undef GET_SME_BUILTIN_INFOS
91};
92} // namespace SME
93} // namespace clang
94
95static constexpr llvm::StringTable BuiltinSVENeonBridgeStrings =
96 CLANG_BUILTIN_STR_TABLE_START
97#define TARGET_BUILTIN CLANG_TARGET_BUILTIN_STR_TABLE
98#define GET_SVE_BUILTINS
99#include "clang/Basic/BuiltinsAArch64NeonSVEBridge.def"
100#undef GET_SVE_BUILTINS
101#undef TARGET_BUILTIN
102 ;
103static constexpr llvm::StringTable BuiltinAArch64Strings =
104 CLANG_BUILTIN_STR_TABLE_START
105#define BUILTIN CLANG_BUILTIN_STR_TABLE
106#define TARGET_BUILTIN CLANG_TARGET_BUILTIN_STR_TABLE
107#define TARGET_HEADER_BUILTIN CLANG_TARGET_HEADER_BUILTIN_STR_TABLE
108#include "clang/Basic/BuiltinsAArch64.def"
109 ;
110
111static constexpr auto BuiltinSVENeonBridgeInfos =
112 Builtin::MakeInfos<NumSVENeonBridgeBuiltins>(Infos: {
113#define TARGET_BUILTIN CLANG_TARGET_BUILTIN_ENTRY
114#define GET_SVE_BUILTINS
115#include "clang/Basic/BuiltinsAArch64NeonSVEBridge.def"
116#undef GET_SVE_BUILTINS
117#undef TARGET_BUILTIN
118 });
119static constexpr auto BuiltinAArch64Infos =
120 Builtin::MakeInfos<NumAArch64Builtins>(Infos: {
121#define BUILTIN CLANG_BUILTIN_ENTRY
122#define TARGET_BUILTIN CLANG_TARGET_BUILTIN_ENTRY
123#define LANGBUILTIN CLANG_LANGBUILTIN_ENTRY
124#define TARGET_HEADER_BUILTIN CLANG_TARGET_HEADER_BUILTIN_ENTRY
125#include "clang/Basic/BuiltinsAArch64.def"
126 });
127
128void AArch64TargetInfo::setArchFeatures() {
129 if (*ArchInfo == llvm::AArch64::ARMV8R) {
130 HasDotProd = true;
131 HasDIT = true;
132 HasFlagM = true;
133 HasRCPC = true;
134 FPU |= NeonMode;
135 HasCCPP = true;
136 HasCRC = true;
137 HasLSE = true;
138 HasRDM = true;
139 } else if (ArchInfo->Version.getMajor() == 8) {
140 if (ArchInfo->Version.getMinor() >= 7u) {
141 HasWFxT = true;
142 }
143 if (ArchInfo->Version.getMinor() >= 6u) {
144 HasBFloat16 = true;
145 HasMatMul = true;
146 }
147 if (ArchInfo->Version.getMinor() >= 5u) {
148 HasAlternativeNZCV = true;
149 HasFRInt3264 = true;
150 HasSSBS = true;
151 HasSB = true;
152 HasPredRes = true;
153 HasBTI = true;
154 }
155 if (ArchInfo->Version.getMinor() >= 4u) {
156 HasDotProd = true;
157 HasDIT = true;
158 HasFlagM = true;
159 }
160 if (ArchInfo->Version.getMinor() >= 3u) {
161 HasRCPC = true;
162 FPU |= NeonMode;
163 }
164 if (ArchInfo->Version.getMinor() >= 2u) {
165 HasCCPP = true;
166 }
167 if (ArchInfo->Version.getMinor() >= 1u) {
168 HasCRC = true;
169 HasLSE = true;
170 HasRDM = true;
171 }
172 } else if (ArchInfo->Version.getMajor() == 9) {
173 if (ArchInfo->Version.getMinor() >= 2u) {
174 HasWFxT = true;
175 }
176 if (ArchInfo->Version.getMinor() >= 1u) {
177 HasBFloat16 = true;
178 HasMatMul = true;
179 }
180 FPU |= SveMode;
181 HasSVE2 = true;
182 HasFullFP16 = true;
183 HasAlternativeNZCV = true;
184 HasFRInt3264 = true;
185 HasSSBS = true;
186 HasSB = true;
187 HasPredRes = true;
188 HasBTI = true;
189 HasDotProd = true;
190 HasDIT = true;
191 HasFlagM = true;
192 HasRCPC = true;
193 FPU |= NeonMode;
194 HasCCPP = true;
195 HasCRC = true;
196 HasLSE = true;
197 HasRDM = true;
198 }
199}
200
201AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
202 const TargetOptions &Opts)
203 : TargetInfo(Triple), ABI("aapcs") {
204 if (getTriple().isOSOpenBSD()) {
205 Int64Type = SignedLongLong;
206 IntMaxType = SignedLongLong;
207 } else {
208 if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
209 WCharType = UnsignedInt;
210
211 Int64Type = SignedLong;
212 IntMaxType = SignedLong;
213 }
214
215 AddrSpaceMap = &ARM64AddrSpaceMap;
216
217 // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
218 HasLegalHalfType = true;
219 HalfArgsAndReturns = true;
220 HasFloat16 = true;
221 HasStrictFP = true;
222
223 if (Triple.isArch64Bit())
224 LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
225 else
226 LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
227
228 BitIntMaxAlign = 128;
229 MaxVectorAlign = 128;
230 MaxAtomicInlineWidth = 128;
231 MaxAtomicPromoteWidth = 128;
232
233 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
234 LongDoubleFormat = &llvm::APFloat::IEEEquad();
235
236 BFloat16Width = BFloat16Align = 16;
237 BFloat16Format = &llvm::APFloat::BFloat();
238
239 // Make __builtin_ms_va_list available.
240 HasBuiltinMSVaList = true;
241
242 // Make the Neon ACLE and SVE types available. Note that this deliberately
243 // doesn't depend on SveMode, since in principle it should be possible to turn
244 // SVE on and off within a translation unit. It should also be possible
245 // to compile the global declaration:
246 //
247 // __SVInt8_t *ptr;
248 //
249 // even without SVE.
250 HasAArch64ACLETypes = true;
251
252 // {} in inline assembly are neon specifiers, not assembly variant
253 // specifiers.
254 NoAsmVariants = true;
255
256 // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
257 // contributes to the alignment of the containing aggregate in the same way
258 // a plain (non bit-field) member of that type would, without exception for
259 // zero-sized or anonymous bit-fields."
260 assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
261 UseZeroLengthBitfieldAlignment = true;
262
263 // AAPCS64 allows any "fundamental integer data type" to be used for
264 // over-sized bitfields, which includes 128-bit integers.
265 LargestOverSizedBitfieldContainer = 128;
266
267 HasUnalignedAccess = true;
268
269 // AArch64 targets default to using the ARM C++ ABI.
270 TheCXXABI.set(TargetCXXABI::GenericAArch64);
271
272 if (Triple.getOS() == llvm::Triple::Linux)
273 this->MCountName = "\01_mcount";
274 else if (Triple.getOS() == llvm::Triple::UnknownOS)
275 this->MCountName =
276 Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
277}
278
279StringRef AArch64TargetInfo::getABI() const { return ABI; }
280
281bool AArch64TargetInfo::setABI(const std::string &Name) {
282 if (Name != "aapcs" && Name != "aapcs-soft" && Name != "darwinpcs" &&
283 Name != "pauthtest")
284 return false;
285
286 ABI = Name;
287 return true;
288}
289
290bool AArch64TargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
291 if (hasFeature(Feature: "fp") && ABI == "aapcs-soft") {
292 // aapcs-soft is not allowed for targets with an FPU, to avoid there being
293 // two incomatible ABIs.
294 Diags.Report(diag::err_target_unsupported_abi_with_fpu) << ABI;
295 return false;
296 }
297 if (getTriple().getEnvironment() == llvm::Triple::PAuthTest &&
298 getTriple().getOS() != llvm::Triple::Linux) {
299 Diags.Report(diag::err_target_unsupported_abi_for_triple)
300 << getTriple().getEnvironmentName() << getTriple().getTriple();
301 return false;
302 }
303 return true;
304}
305
306bool AArch64TargetInfo::validateGlobalRegisterVariable(
307 StringRef RegName, unsigned RegSize, bool &HasSizeMismatch) const {
308 if (RegName == "sp") {
309 HasSizeMismatch = RegSize != 64;
310 return true;
311 }
312 if (RegName.starts_with(Prefix: "w"))
313 HasSizeMismatch = RegSize != 32;
314 else if (RegName.starts_with(Prefix: "x"))
315 HasSizeMismatch = RegSize != 64;
316 else
317 return false;
318 StringRef RegNum = RegName.drop_front();
319 // Check if the register is reserved. See also
320 // AArch64TargetLowering::getRegisterByName().
321 return RegNum == "0" ||
322 (RegNum == "18" &&
323 llvm::AArch64::isX18ReservedByDefault(TT: getTriple())) ||
324 getTargetOpts().FeatureMap.lookup(Key: ("reserve-x" + RegNum).str());
325}
326
327bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
328 BranchProtectionInfo &BPI,
329 const LangOptions &LO,
330 StringRef &Err) const {
331 llvm::ARM::ParsedBranchProtection PBP;
332 if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err, EnablePAuthLR: HasPAuthLR))
333 return false;
334
335 // GCS is currently untested with ptrauth-returns, but enabling this could be
336 // allowed in future after testing with a suitable system.
337 if (LO.PointerAuthReturns &&
338 (PBP.Scope != "none" || PBP.BranchProtectionPAuthLR ||
339 PBP.GuardedControlStack))
340 return false;
341
342 BPI.SignReturnAddr =
343 llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
344 .Case(S: "non-leaf", Value: LangOptions::SignReturnAddressScopeKind::NonLeaf)
345 .Case(S: "all", Value: LangOptions::SignReturnAddressScopeKind::All)
346 .Default(Value: LangOptions::SignReturnAddressScopeKind::None);
347
348 if (PBP.Key == "a_key")
349 BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
350 else
351 BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
352
353 BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
354 BPI.BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
355 BPI.GuardedControlStack = PBP.GuardedControlStack;
356 return true;
357}
358
359bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
360 return llvm::AArch64::parseCpu(Name).has_value();
361}
362
363bool AArch64TargetInfo::setCPU(const std::string &Name) {
364 return isValidCPUName(Name);
365}
366
367void AArch64TargetInfo::fillValidCPUList(
368 SmallVectorImpl<StringRef> &Values) const {
369 llvm::AArch64::fillValidCPUArchList(Values);
370}
371
372void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
373 MacroBuilder &Builder) const {
374 Builder.defineMacro(Name: "__ARM_FEATURE_QRDMX", Value: "1");
375}
376
377void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
378 MacroBuilder &Builder) const {
379 // Also include the ARMv8.1 defines
380 getTargetDefinesARMV81A(Opts, Builder);
381}
382
383void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
384 MacroBuilder &Builder) const {
385 Builder.defineMacro(Name: "__ARM_FEATURE_COMPLEX", Value: "1");
386 Builder.defineMacro(Name: "__ARM_FEATURE_JCVT", Value: "1");
387 // Also include the Armv8.2 defines
388 getTargetDefinesARMV82A(Opts, Builder);
389}
390
391void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
392 MacroBuilder &Builder) const {
393 // Also include the Armv8.3 defines
394 getTargetDefinesARMV83A(Opts, Builder);
395}
396
397void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
398 MacroBuilder &Builder) const {
399 Builder.defineMacro(Name: "__ARM_FEATURE_FRINT", Value: "1");
400 // Also include the Armv8.4 defines
401 getTargetDefinesARMV84A(Opts, Builder);
402}
403
404void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
405 MacroBuilder &Builder) const {
406 // Also include the Armv8.5 defines
407 // FIXME: Armv8.6 makes the following extensions mandatory:
408 // - __ARM_FEATURE_BF16
409 // - __ARM_FEATURE_MATMUL_INT8
410 // Handle them here.
411 getTargetDefinesARMV85A(Opts, Builder);
412}
413
414void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
415 MacroBuilder &Builder) const {
416 // Also include the Armv8.6 defines
417 getTargetDefinesARMV86A(Opts, Builder);
418}
419
420void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
421 MacroBuilder &Builder) const {
422 // Also include the Armv8.7 defines
423 getTargetDefinesARMV87A(Opts, Builder);
424}
425
426void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions &Opts,
427 MacroBuilder &Builder) const {
428 // Also include the Armv8.8 defines
429 getTargetDefinesARMV88A(Opts, Builder);
430}
431
432void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
433 MacroBuilder &Builder) const {
434 // Armv9-A maps to Armv8.5-A
435 getTargetDefinesARMV85A(Opts, Builder);
436}
437
438void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
439 MacroBuilder &Builder) const {
440 // Armv9.1-A maps to Armv8.6-A
441 getTargetDefinesARMV86A(Opts, Builder);
442}
443
444void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
445 MacroBuilder &Builder) const {
446 // Armv9.2-A maps to Armv8.7-A
447 getTargetDefinesARMV87A(Opts, Builder);
448}
449
450void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
451 MacroBuilder &Builder) const {
452 // Armv9.3-A maps to Armv8.8-A
453 getTargetDefinesARMV88A(Opts, Builder);
454}
455
456void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions &Opts,
457 MacroBuilder &Builder) const {
458 // Armv9.4-A maps to Armv8.9-A
459 getTargetDefinesARMV89A(Opts, Builder);
460}
461
462void AArch64TargetInfo::getTargetDefinesARMV95A(const LangOptions &Opts,
463 MacroBuilder &Builder) const {
464 // Armv9.5-A does not have a v8.* equivalent, but is a superset of v9.4-A.
465 getTargetDefinesARMV94A(Opts, Builder);
466}
467
468void AArch64TargetInfo::getTargetDefinesARMV96A(const LangOptions &Opts,
469 MacroBuilder &Builder) const {
470 // Armv9.6-A does not have a v8.* equivalent, but is a superset of v9.5-A.
471 getTargetDefinesARMV95A(Opts, Builder);
472}
473
474void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
475 MacroBuilder &Builder) const {
476 // Target identification.
477 if (getTriple().isWindowsArm64EC()) {
478 // Define the same set of macros as would be defined on x86_64 to ensure that
479 // ARM64EC datatype layouts match those of x86_64 compiled code
480 Builder.defineMacro(Name: "__amd64__");
481 Builder.defineMacro(Name: "__amd64");
482 Builder.defineMacro(Name: "__x86_64");
483 Builder.defineMacro(Name: "__x86_64__");
484 Builder.defineMacro(Name: "__arm64ec__");
485 } else {
486 Builder.defineMacro(Name: "__aarch64__");
487 }
488
489 // Inline assembly supports AArch64 flag outputs.
490 Builder.defineMacro(Name: "__GCC_ASM_FLAG_OUTPUTS__");
491
492 std::string CodeModel = getTargetOpts().CodeModel;
493 if (CodeModel == "default")
494 CodeModel = "small";
495 for (char &c : CodeModel)
496 c = toupper(c: c);
497 Builder.defineMacro(Name: "__AARCH64_CMODEL_" + CodeModel + "__");
498
499 // ACLE predefines. Many can only have one possible value on v8 AArch64.
500 Builder.defineMacro(Name: "__ARM_ACLE_VERSION(year, quarter, patch)",
501 Value: "(100 * (year) + 10 * (quarter) + (patch))");
502#define ARM_ACLE_VERSION(Y, Q, P) (100 * (Y) + 10 * (Q) + (P))
503 Builder.defineMacro(Name: "__ARM_ACLE", Value: Twine(ARM_ACLE_VERSION(2024, 2, 0)));
504 Builder.defineMacro(Name: "__FUNCTION_MULTI_VERSIONING_SUPPORT_LEVEL",
505 Value: Twine(ARM_ACLE_VERSION(2024, 3, 0)));
506#undef ARM_ACLE_VERSION
507 Builder.defineMacro(Name: "__ARM_ARCH",
508 Value: std::to_string(val: ArchInfo->Version.getMajor()));
509 Builder.defineMacro(Name: "__ARM_ARCH_PROFILE",
510 Value: std::string("'") + (char)ArchInfo->Profile + "'");
511
512 Builder.defineMacro(Name: "__ARM_64BIT_STATE", Value: "1");
513 Builder.defineMacro(Name: "__ARM_PCS_AAPCS64", Value: "1");
514 Builder.defineMacro(Name: "__ARM_ARCH_ISA_A64", Value: "1");
515
516 Builder.defineMacro(Name: "__ARM_FEATURE_CLZ", Value: "1");
517 Builder.defineMacro(Name: "__ARM_FEATURE_FMA", Value: "1");
518 Builder.defineMacro(Name: "__ARM_FEATURE_LDREX", Value: "0xF");
519 Builder.defineMacro(Name: "__ARM_FEATURE_IDIV", Value: "1"); // As specified in ACLE
520 Builder.defineMacro(Name: "__ARM_FEATURE_DIV"); // For backwards compatibility
521 Builder.defineMacro(Name: "__ARM_FEATURE_NUMERIC_MAXMIN", Value: "1");
522 Builder.defineMacro(Name: "__ARM_FEATURE_DIRECTED_ROUNDING", Value: "1");
523
524 Builder.defineMacro(Name: "__ARM_ALIGN_MAX_STACK_PWR", Value: "4");
525
526 // These macros are set when Clang can parse declarations with these
527 // attributes.
528 Builder.defineMacro(Name: "__ARM_STATE_ZA", Value: "1");
529 Builder.defineMacro(Name: "__ARM_STATE_ZT0", Value: "1");
530
531 // 0xe implies support for half, single and double precision operations.
532 if (FPU & FPUMode)
533 Builder.defineMacro(Name: "__ARM_FP", Value: "0xE");
534
535 // PCS specifies this for SysV variants, which is all we support. Other ABIs
536 // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
537 Builder.defineMacro(Name: "__ARM_FP16_FORMAT_IEEE", Value: "1");
538 Builder.defineMacro(Name: "__ARM_FP16_ARGS", Value: "1");
539
540 // Clang supports arm_neon_sve_bridge.h
541 Builder.defineMacro(Name: "__ARM_NEON_SVE_BRIDGE", Value: "1");
542
543 if (Opts.UnsafeFPMath)
544 Builder.defineMacro(Name: "__ARM_FP_FAST", Value: "1");
545
546 Builder.defineMacro(Name: "__ARM_SIZEOF_WCHAR_T",
547 Value: Twine(Opts.WCharSize ? Opts.WCharSize : 4));
548
549 Builder.defineMacro(Name: "__ARM_SIZEOF_MINIMAL_ENUM", Value: Opts.ShortEnums ? "1" : "4");
550
551 if (FPU & NeonMode) {
552 Builder.defineMacro(Name: "__ARM_NEON", Value: "1");
553 // 64-bit NEON supports half, single and double precision operations.
554 Builder.defineMacro(Name: "__ARM_NEON_FP", Value: "0xE");
555 }
556
557 if (FPU & SveMode)
558 Builder.defineMacro(Name: "__ARM_FEATURE_SVE", Value: "1");
559
560 if (HasSVE2)
561 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2", Value: "1");
562
563 if (HasSVE2p1)
564 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2p1", Value: "1");
565
566 if (HasSVE2 && HasSVEAES)
567 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_AES", Value: "1");
568
569 if (HasSVE2 && HasSVEBitPerm)
570 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_BITPERM", Value: "1");
571
572 if (HasSVE2 && HasSVE2SHA3)
573 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_SHA3", Value: "1");
574
575 if (HasSVE2 && HasSVE2SM4)
576 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_SM4", Value: "1");
577
578 if (HasSVEB16B16)
579 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_B16B16", Value: "1");
580
581 if (HasSME) {
582 Builder.defineMacro(Name: "__ARM_FEATURE_SME");
583 Builder.defineMacro(Name: "__ARM_FEATURE_LOCALLY_STREAMING", Value: "1");
584 }
585
586 if (HasSME2)
587 Builder.defineMacro(Name: "__ARM_FEATURE_SME2", Value: "1");
588
589 if (HasSME2p1)
590 Builder.defineMacro(Name: "__ARM_FEATURE_SME2p1", Value: "1");
591
592 if (HasSMEF16F16)
593 Builder.defineMacro(Name: "__ARM_FEATURE_SME_F16F16", Value: "1");
594
595 if (HasSMEB16B16)
596 Builder.defineMacro(Name: "__ARM_FEATURE_SME_B16B16", Value: "1");
597
598 if (HasFP8)
599 Builder.defineMacro(Name: "__ARM_FEATURE_FP8", Value: "1");
600
601 if (HasFP8FMA)
602 Builder.defineMacro(Name: "__ARM_FEATURE_FP8FMA", Value: "1");
603
604 if (HasFP8DOT2)
605 Builder.defineMacro(Name: "__ARM_FEATURE_FP8DOT2", Value: "1");
606
607 if (HasFP8DOT4)
608 Builder.defineMacro(Name: "__ARM_FEATURE_FP8DOT4", Value: "1");
609
610 if (HasSSVE_FP8DOT2)
611 Builder.defineMacro(Name: "__ARM_FEATURE_SSVE_FP8DOT2", Value: "1");
612
613 if (HasSSVE_FP8DOT4)
614 Builder.defineMacro(Name: "__ARM_FEATURE_SSVE_FP8DOT4", Value: "1");
615
616 if (HasSSVE_FP8FMA)
617 Builder.defineMacro(Name: "__ARM_FEATURE_SSVE_FP8FMA", Value: "1");
618
619 if (HasSME_F8F32)
620 Builder.defineMacro(Name: "__ARM_FEATURE_SME_F8F32", Value: "1");
621
622 if (HasSME_F8F16)
623 Builder.defineMacro(Name: "__ARM_FEATURE_SME_F8F16", Value: "1");
624
625 if (HasCRC)
626 Builder.defineMacro(Name: "__ARM_FEATURE_CRC32", Value: "1");
627
628 if (HasRCPC3)
629 Builder.defineMacro(Name: "__ARM_FEATURE_RCPC", Value: "3");
630 else if (HasRCPC)
631 Builder.defineMacro(Name: "__ARM_FEATURE_RCPC", Value: "1");
632
633 if (HasFMV)
634 Builder.defineMacro(Name: "__HAVE_FUNCTION_MULTI_VERSIONING", Value: "1");
635
636 // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
637 // macros for AES, SHA2, SHA3 and SM4
638 if (HasAES && HasSHA2)
639 Builder.defineMacro(Name: "__ARM_FEATURE_CRYPTO", Value: "1");
640
641 if (HasAES)
642 Builder.defineMacro(Name: "__ARM_FEATURE_AES", Value: "1");
643
644 if (HasSHA2)
645 Builder.defineMacro(Name: "__ARM_FEATURE_SHA2", Value: "1");
646
647 if (HasSHA3) {
648 Builder.defineMacro(Name: "__ARM_FEATURE_SHA3", Value: "1");
649 Builder.defineMacro(Name: "__ARM_FEATURE_SHA512", Value: "1");
650 }
651
652 if (HasSM4) {
653 Builder.defineMacro(Name: "__ARM_FEATURE_SM3", Value: "1");
654 Builder.defineMacro(Name: "__ARM_FEATURE_SM4", Value: "1");
655 }
656
657 if (HasPAuth)
658 Builder.defineMacro(Name: "__ARM_FEATURE_PAUTH", Value: "1");
659
660 if (HasPAuthLR)
661 Builder.defineMacro(Name: "__ARM_FEATURE_PAUTH_LR", Value: "1");
662
663 if (HasBTI)
664 Builder.defineMacro(Name: "__ARM_FEATURE_BTI", Value: "1");
665
666 if (HasUnalignedAccess)
667 Builder.defineMacro(Name: "__ARM_FEATURE_UNALIGNED", Value: "1");
668
669 if ((FPU & NeonMode) && HasFullFP16)
670 Builder.defineMacro(Name: "__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", Value: "1");
671 if (HasFullFP16)
672 Builder.defineMacro(Name: "__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", Value: "1");
673
674 if (HasDotProd)
675 Builder.defineMacro(Name: "__ARM_FEATURE_DOTPROD", Value: "1");
676
677 if (HasMTE)
678 Builder.defineMacro(Name: "__ARM_FEATURE_MEMORY_TAGGING", Value: "1");
679
680 if (HasTME)
681 Builder.defineMacro(Name: "__ARM_FEATURE_TME", Value: "1");
682
683 if (HasMatMul)
684 Builder.defineMacro(Name: "__ARM_FEATURE_MATMUL_INT8", Value: "1");
685
686 if (HasLSE)
687 Builder.defineMacro(Name: "__ARM_FEATURE_ATOMICS", Value: "1");
688
689 if (HasBFloat16) {
690 Builder.defineMacro(Name: "__ARM_FEATURE_BF16", Value: "1");
691 Builder.defineMacro(Name: "__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", Value: "1");
692 Builder.defineMacro(Name: "__ARM_BF16_FORMAT_ALTERNATIVE", Value: "1");
693 Builder.defineMacro(Name: "__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", Value: "1");
694 }
695
696 if ((FPU & SveMode) && HasBFloat16) {
697 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_BF16", Value: "1");
698 }
699
700 if ((FPU & SveMode) && HasMatmulFP64)
701 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_MATMUL_FP64", Value: "1");
702
703 if ((FPU & SveMode) && HasMatmulFP32)
704 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_MATMUL_FP32", Value: "1");
705
706 if ((FPU & SveMode) && HasMatMul)
707 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_MATMUL_INT8", Value: "1");
708
709 if ((FPU & NeonMode) && HasFP16FML)
710 Builder.defineMacro(Name: "__ARM_FEATURE_FP16_FML", Value: "1");
711
712 if (Opts.hasSignReturnAddress()) {
713 // Bitmask:
714 // 0: Protection using the A key
715 // 1: Protection using the B key
716 // 2: Protection including leaf functions
717 // 3: Protection using PC as a diversifier
718 unsigned Value = 0;
719
720 if (Opts.isSignReturnAddressWithAKey())
721 Value |= (1 << 0);
722 else
723 Value |= (1 << 1);
724
725 if (Opts.isSignReturnAddressScopeAll())
726 Value |= (1 << 2);
727
728 if (Opts.BranchProtectionPAuthLR)
729 Value |= (1 << 3);
730
731 Builder.defineMacro(Name: "__ARM_FEATURE_PAC_DEFAULT", Value: std::to_string(val: Value));
732 }
733
734 if (Opts.BranchTargetEnforcement)
735 Builder.defineMacro(Name: "__ARM_FEATURE_BTI_DEFAULT", Value: "1");
736
737 if (Opts.GuardedControlStack)
738 Builder.defineMacro(Name: "__ARM_FEATURE_GCS_DEFAULT", Value: "1");
739
740 if (HasLS64)
741 Builder.defineMacro(Name: "__ARM_FEATURE_LS64", Value: "1");
742
743 if (HasRandGen)
744 Builder.defineMacro(Name: "__ARM_FEATURE_RNG", Value: "1");
745
746 if (HasMOPS)
747 Builder.defineMacro(Name: "__ARM_FEATURE_MOPS", Value: "1");
748
749 if (HasD128)
750 Builder.defineMacro(Name: "__ARM_FEATURE_SYSREG128", Value: "1");
751
752 if (HasGCS)
753 Builder.defineMacro(Name: "__ARM_FEATURE_GCS", Value: "1");
754
755 if (*ArchInfo == llvm::AArch64::ARMV8_1A)
756 getTargetDefinesARMV81A(Opts, Builder);
757 else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
758 getTargetDefinesARMV82A(Opts, Builder);
759 else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
760 getTargetDefinesARMV83A(Opts, Builder);
761 else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
762 getTargetDefinesARMV84A(Opts, Builder);
763 else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
764 getTargetDefinesARMV85A(Opts, Builder);
765 else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
766 getTargetDefinesARMV86A(Opts, Builder);
767 else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
768 getTargetDefinesARMV87A(Opts, Builder);
769 else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
770 getTargetDefinesARMV88A(Opts, Builder);
771 else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
772 getTargetDefinesARMV89A(Opts, Builder);
773 else if (*ArchInfo == llvm::AArch64::ARMV9A)
774 getTargetDefinesARMV9A(Opts, Builder);
775 else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
776 getTargetDefinesARMV91A(Opts, Builder);
777 else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
778 getTargetDefinesARMV92A(Opts, Builder);
779 else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
780 getTargetDefinesARMV93A(Opts, Builder);
781 else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
782 getTargetDefinesARMV94A(Opts, Builder);
783 else if (*ArchInfo == llvm::AArch64::ARMV9_5A)
784 getTargetDefinesARMV95A(Opts, Builder);
785 else if (*ArchInfo == llvm::AArch64::ARMV9_6A)
786 getTargetDefinesARMV96A(Opts, Builder);
787
788 // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8|16) builtins work.
789 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
790 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
791 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
792 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
793 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
794
795 // Allow detection of fast FMA support.
796 Builder.defineMacro(Name: "__FP_FAST_FMA", Value: "1");
797 Builder.defineMacro(Name: "__FP_FAST_FMAF", Value: "1");
798
799 // C/C++ operators work on both VLS and VLA SVE types
800 if (FPU & SveMode)
801 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_VECTOR_OPERATORS", Value: "2");
802
803 if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
804 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_BITS", Value: Twine(Opts.VScaleMin * 128));
805 }
806}
807
808llvm::SmallVector<Builtin::InfosShard>
809AArch64TargetInfo::getTargetBuiltins() const {
810 return {
811 {&NEON::BuiltinStrings, NEON::BuiltinInfos, "__builtin_neon_"},
812 {&NEON::FP16::BuiltinStrings, NEON::FP16::BuiltinInfos,
813 "__builtin_neon_"},
814 {&SVE::BuiltinStrings, SVE::BuiltinInfos, "__builtin_sve_"},
815 {&BuiltinSVENeonBridgeStrings, BuiltinSVENeonBridgeInfos},
816 {&SME::BuiltinStrings, SME::BuiltinInfos, "__builtin_sme_"},
817 {&BuiltinAArch64Strings, BuiltinAArch64Infos},
818 };
819}
820
821std::optional<std::pair<unsigned, unsigned>>
822AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts,
823 bool IsArmStreamingFunction,
824 llvm::StringMap<bool> *FeatureMap) const {
825 if (LangOpts.VScaleMin || LangOpts.VScaleMax)
826 return std::pair<unsigned, unsigned>(
827 LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
828
829 if (hasFeature(Feature: "sve") || (FeatureMap && (FeatureMap->lookup(Key: "sve"))))
830 return std::pair<unsigned, unsigned>(1, 16);
831
832 if (IsArmStreamingFunction &&
833 (hasFeature(Feature: "sme") || (FeatureMap && (FeatureMap->lookup(Key: "sme")))))
834 return std::pair<unsigned, unsigned>(1, 16);
835
836 return std::nullopt;
837}
838
839uint64_t AArch64TargetInfo::getFMVPriority(ArrayRef<StringRef> Features) const {
840 return llvm::AArch64::getFMVPriority(Features);
841}
842
843bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
844 // FMV extensions which imply no backend features do not affect codegen.
845 if (auto Ext = llvm::AArch64::parseFMVExtension(Name))
846 return Ext->ID.has_value();
847 return false;
848}
849
850bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
851 // CPU features might be separated by '+', extract them and check
852 llvm::SmallVector<StringRef, 8> Features;
853 FeatureStr.split(A&: Features, Separator: "+");
854 for (auto &Feature : Features)
855 if (!llvm::AArch64::parseFMVExtension(Extension: Feature.trim()).has_value())
856 return false;
857 return true;
858}
859
860bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
861 return llvm::StringSwitch<bool>(Feature)
862 .Cases(S0: "aarch64", S1: "arm64", S2: "arm", Value: true)
863 .Case(S: "fmv", Value: HasFMV)
864 .Case(S: "fp", Value: FPU & FPUMode)
865 .Cases(S0: "neon", S1: "simd", Value: FPU & NeonMode)
866 .Case(S: "jscvt", Value: HasJSCVT)
867 .Case(S: "fcma", Value: HasFCMA)
868 .Case(S: "rng", Value: HasRandGen)
869 .Case(S: "flagm", Value: HasFlagM)
870 .Case(S: "flagm2", Value: HasAlternativeNZCV)
871 .Case(S: "fp16fml", Value: HasFP16FML)
872 .Case(S: "dotprod", Value: HasDotProd)
873 .Case(S: "sm4", Value: HasSM4)
874 .Case(S: "rdm", Value: HasRDM)
875 .Case(S: "lse", Value: HasLSE)
876 .Case(S: "crc", Value: HasCRC)
877 .Case(S: "sha2", Value: HasSHA2)
878 .Case(S: "sha3", Value: HasSHA3)
879 .Cases(S0: "aes", S1: "pmull", Value: HasAES)
880 .Cases(S0: "fp16", S1: "fullfp16", Value: HasFullFP16)
881 .Case(S: "dit", Value: HasDIT)
882 .Case(S: "dpb", Value: HasCCPP)
883 .Case(S: "dpb2", Value: HasCCDP)
884 .Case(S: "rcpc", Value: HasRCPC)
885 .Case(S: "frintts", Value: HasFRInt3264)
886 .Case(S: "i8mm", Value: HasMatMul)
887 .Case(S: "bf16", Value: HasBFloat16)
888 .Case(S: "sve", Value: FPU & SveMode)
889 .Case(S: "sve-b16b16", Value: HasSVEB16B16)
890 .Case(S: "f32mm", Value: FPU & SveMode && HasMatmulFP32)
891 .Case(S: "f64mm", Value: FPU & SveMode && HasMatmulFP64)
892 .Case(S: "sve2", Value: FPU & SveMode && HasSVE2)
893 .Case(S: "sve-aes", Value: HasSVEAES)
894 .Case(S: "sve-bitperm", Value: FPU & HasSVEBitPerm)
895 .Case(S: "sve2-sha3", Value: FPU & SveMode && HasSVE2SHA3)
896 .Case(S: "sve2-sm4", Value: FPU & SveMode && HasSVE2SM4)
897 .Case(S: "sve2p1", Value: FPU & SveMode && HasSVE2p1)
898 .Case(S: "sme", Value: HasSME)
899 .Case(S: "sme2", Value: HasSME2)
900 .Case(S: "sme2p1", Value: HasSME2p1)
901 .Case(S: "sme-f64f64", Value: HasSMEF64F64)
902 .Case(S: "sme-i16i64", Value: HasSMEI16I64)
903 .Case(S: "sme-fa64", Value: HasSMEFA64)
904 .Case(S: "sme-f16f16", Value: HasSMEF16F16)
905 .Case(S: "sme-b16b16", Value: HasSMEB16B16)
906 .Case(S: "memtag", Value: HasMTE)
907 .Case(S: "sb", Value: HasSB)
908 .Case(S: "predres", Value: HasPredRes)
909 .Cases(S0: "ssbs", S1: "ssbs2", Value: HasSSBS)
910 .Case(S: "bti", Value: HasBTI)
911 .Cases(S0: "ls64", S1: "ls64_v", S2: "ls64_accdata", Value: HasLS64)
912 .Case(S: "wfxt", Value: HasWFxT)
913 .Case(S: "rcpc3", Value: HasRCPC3)
914 .Case(S: "fp8", Value: HasFP8)
915 .Case(S: "fp8fma", Value: HasFP8FMA)
916 .Case(S: "fp8dot2", Value: HasFP8DOT2)
917 .Case(S: "fp8dot4", Value: HasFP8DOT4)
918 .Case(S: "ssve-fp8dot2", Value: HasSSVE_FP8DOT2)
919 .Case(S: "ssve-fp8dot4", Value: HasSSVE_FP8DOT4)
920 .Case(S: "ssve-fp8fma", Value: HasSSVE_FP8FMA)
921 .Case(S: "sme-f8f32", Value: HasSME_F8F32)
922 .Case(S: "sme-f8f16", Value: HasSME_F8F16)
923 .Default(Value: false);
924}
925
926void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
927 StringRef Name, bool Enabled) const {
928 Features[Name] = Enabled;
929 // If the feature is an architecture feature (like v8.2a), add all previous
930 // architecture versions and any dependant target features.
931 const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
932 llvm::AArch64::ArchInfo::findBySubArch(Name);
933
934 if (!ArchInfo)
935 return; // Not an architecture, nothing more to do.
936
937 // Disabling an architecture feature does not affect dependent features
938 if (!Enabled)
939 return;
940
941 for (const auto *OtherArch : llvm::AArch64::ArchInfos)
942 if (ArchInfo->implies(*OtherArch))
943 Features[OtherArch->getSubArch()] = true;
944
945 // Set any features implied by the architecture
946 std::vector<StringRef> CPUFeats;
947 if (llvm::AArch64::getExtensionFeatures(ArchInfo->DefaultExts, CPUFeats)) {
948 for (auto F : CPUFeats) {
949 assert(F[0] == '+' && "Expected + in target feature!");
950 Features[F.drop_front(N: 1)] = true;
951 }
952 }
953}
954
955bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
956 DiagnosticsEngine &Diags) {
957 for (const auto &Feature : Features) {
958 if (Feature == "-fp-armv8")
959 HasNoFP = true;
960 if (Feature == "-neon")
961 HasNoNeon = true;
962 if (Feature == "-sve")
963 HasNoSVE = true;
964
965 if (Feature == "+neon" || Feature == "+fp-armv8")
966 FPU |= NeonMode;
967 if (Feature == "+jscvt") {
968 HasJSCVT = true;
969 FPU |= NeonMode;
970 }
971 if (Feature == "+fcma") {
972 HasFCMA = true;
973 FPU |= NeonMode;
974 }
975
976 if (Feature == "+sve") {
977 FPU |= NeonMode;
978 FPU |= SveMode;
979 HasFullFP16 = true;
980 }
981 if (Feature == "+sve2") {
982 FPU |= NeonMode;
983 FPU |= SveMode;
984 HasFullFP16 = true;
985 HasSVE2 = true;
986 }
987 if (Feature == "+sve2p1") {
988 FPU |= NeonMode;
989 FPU |= SveMode;
990 HasFullFP16 = true;
991 HasSVE2 = true;
992 HasSVE2p1 = true;
993 }
994 if (Feature == "+sve-aes") {
995 FPU |= NeonMode;
996 HasFullFP16 = true;
997 HasSVEAES = true;
998 }
999 if (Feature == "+sve2-sha3") {
1000 FPU |= NeonMode;
1001 FPU |= SveMode;
1002 HasFullFP16 = true;
1003 HasSVE2 = true;
1004 HasSVE2SHA3 = true;
1005 }
1006 if (Feature == "+sve2-sm4") {
1007 FPU |= NeonMode;
1008 FPU |= SveMode;
1009 HasFullFP16 = true;
1010 HasSVE2 = true;
1011 HasSVE2SM4 = true;
1012 }
1013 if (Feature == "+sve-b16b16")
1014 HasSVEB16B16 = true;
1015 if (Feature == "+sve-bitperm") {
1016 FPU |= NeonMode;
1017 HasFullFP16 = true;
1018 HasSVEBitPerm = true;
1019 }
1020 if (Feature == "+f32mm") {
1021 FPU |= NeonMode;
1022 FPU |= SveMode;
1023 HasFullFP16 = true;
1024 HasMatmulFP32 = true;
1025 }
1026 if (Feature == "+f64mm") {
1027 FPU |= NeonMode;
1028 FPU |= SveMode;
1029 HasFullFP16 = true;
1030 HasMatmulFP64 = true;
1031 }
1032 if (Feature == "+sme") {
1033 HasSME = true;
1034 HasBFloat16 = true;
1035 HasFullFP16 = true;
1036 }
1037 if (Feature == "+sme2") {
1038 HasSME = true;
1039 HasSME2 = true;
1040 HasBFloat16 = true;
1041 HasFullFP16 = true;
1042 }
1043 if (Feature == "+sme2p1") {
1044 HasSME = true;
1045 HasSME2 = true;
1046 HasSME2p1 = true;
1047 HasBFloat16 = true;
1048 HasFullFP16 = true;
1049 }
1050 if (Feature == "+sme-f64f64") {
1051 HasSME = true;
1052 HasSMEF64F64 = true;
1053 HasBFloat16 = true;
1054 HasFullFP16 = true;
1055 }
1056 if (Feature == "+sme-i16i64") {
1057 HasSME = true;
1058 HasSMEI16I64 = true;
1059 HasBFloat16 = true;
1060 HasFullFP16 = true;
1061 }
1062 if (Feature == "+sme-fa64") {
1063 FPU |= NeonMode;
1064 FPU |= SveMode;
1065 HasSME = true;
1066 HasSVE2 = true;
1067 HasSMEFA64 = true;
1068 }
1069 if (Feature == "+sme-f16f16") {
1070 HasSME = true;
1071 HasSME2 = true;
1072 HasBFloat16 = true;
1073 HasFullFP16 = true;
1074 HasSMEF16F16 = true;
1075 }
1076 if (Feature == "+sme-b16b16") {
1077 HasSME = true;
1078 HasSME2 = true;
1079 HasBFloat16 = true;
1080 HasFullFP16 = true;
1081 HasSVEB16B16 = true;
1082 HasSMEB16B16 = true;
1083 }
1084
1085 if (Feature == "+fp8")
1086 HasFP8 = true;
1087 if (Feature == "+fp8fma")
1088 HasFP8FMA = true;
1089 if (Feature == "+fp8dot2")
1090 HasFP8DOT2 = true;
1091 if (Feature == "+fp8dot4")
1092 HasFP8DOT4 = true;
1093 if (Feature == "+ssve-fp8dot2")
1094 HasSSVE_FP8DOT2 = true;
1095 if (Feature == "+ssve-fp8dot4")
1096 HasSSVE_FP8DOT4 = true;
1097 if (Feature == "+ssve-fp8fma")
1098 HasSSVE_FP8FMA = true;
1099 if (Feature == "+sme-f8f32")
1100 HasSME_F8F32 = true;
1101 if (Feature == "+sme-f8f16")
1102 HasSME_F8F16 = true;
1103 if (Feature == "+sb")
1104 HasSB = true;
1105 if (Feature == "+predres")
1106 HasPredRes = true;
1107 if (Feature == "+ssbs")
1108 HasSSBS = true;
1109 if (Feature == "+bti")
1110 HasBTI = true;
1111 if (Feature == "+wfxt")
1112 HasWFxT = true;
1113 if (Feature == "-fmv")
1114 HasFMV = false;
1115 if (Feature == "+crc")
1116 HasCRC = true;
1117 if (Feature == "+rcpc")
1118 HasRCPC = true;
1119 if (Feature == "+aes") {
1120 FPU |= NeonMode;
1121 HasAES = true;
1122 }
1123 if (Feature == "+sha2") {
1124 FPU |= NeonMode;
1125 HasSHA2 = true;
1126 }
1127 if (Feature == "+sha3") {
1128 FPU |= NeonMode;
1129 HasSHA2 = true;
1130 HasSHA3 = true;
1131 }
1132 if (Feature == "+rdm") {
1133 FPU |= NeonMode;
1134 HasRDM = true;
1135 }
1136 if (Feature == "+dit")
1137 HasDIT = true;
1138 if (Feature == "+cccp")
1139 HasCCPP = true;
1140 if (Feature == "+ccdp") {
1141 HasCCPP = true;
1142 HasCCDP = true;
1143 }
1144 if (Feature == "+fptoint")
1145 HasFRInt3264 = true;
1146 if (Feature == "+sm4") {
1147 FPU |= NeonMode;
1148 HasSM4 = true;
1149 }
1150 if (Feature == "+strict-align")
1151 HasUnalignedAccess = false;
1152
1153 // All predecessor archs are added but select the latest one for ArchKind.
1154 if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
1155 ArchInfo = &llvm::AArch64::ARMV8A;
1156 if (Feature == "+v8.1a" &&
1157 ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
1158 ArchInfo = &llvm::AArch64::ARMV8_1A;
1159 if (Feature == "+v8.2a" &&
1160 ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
1161 ArchInfo = &llvm::AArch64::ARMV8_2A;
1162 if (Feature == "+v8.3a" &&
1163 ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
1164 ArchInfo = &llvm::AArch64::ARMV8_3A;
1165 if (Feature == "+v8.4a" &&
1166 ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
1167 ArchInfo = &llvm::AArch64::ARMV8_4A;
1168 if (Feature == "+v8.5a" &&
1169 ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
1170 ArchInfo = &llvm::AArch64::ARMV8_5A;
1171 if (Feature == "+v8.6a" &&
1172 ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
1173 ArchInfo = &llvm::AArch64::ARMV8_6A;
1174 if (Feature == "+v8.7a" &&
1175 ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
1176 ArchInfo = &llvm::AArch64::ARMV8_7A;
1177 if (Feature == "+v8.8a" &&
1178 ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
1179 ArchInfo = &llvm::AArch64::ARMV8_8A;
1180 if (Feature == "+v8.9a" &&
1181 ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
1182 ArchInfo = &llvm::AArch64::ARMV8_9A;
1183 if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
1184 ArchInfo = &llvm::AArch64::ARMV9A;
1185 if (Feature == "+v9.1a" &&
1186 ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
1187 ArchInfo = &llvm::AArch64::ARMV9_1A;
1188 if (Feature == "+v9.2a" &&
1189 ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
1190 ArchInfo = &llvm::AArch64::ARMV9_2A;
1191 if (Feature == "+v9.3a" &&
1192 ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
1193 ArchInfo = &llvm::AArch64::ARMV9_3A;
1194 if (Feature == "+v9.4a" &&
1195 ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
1196 ArchInfo = &llvm::AArch64::ARMV9_4A;
1197 if (Feature == "+v9.5a" &&
1198 ArchInfo->Version < llvm::AArch64::ARMV9_5A.Version)
1199 ArchInfo = &llvm::AArch64::ARMV9_5A;
1200 if (Feature == "+v9.6a" &&
1201 ArchInfo->Version < llvm::AArch64::ARMV9_6A.Version)
1202 ArchInfo = &llvm::AArch64::ARMV9_6A;
1203 if (Feature == "+v8r")
1204 ArchInfo = &llvm::AArch64::ARMV8R;
1205 if (Feature == "+fullfp16") {
1206 FPU |= NeonMode;
1207 HasFullFP16 = true;
1208 }
1209 if (Feature == "+dotprod") {
1210 FPU |= NeonMode;
1211 HasDotProd = true;
1212 }
1213 if (Feature == "+fp16fml") {
1214 FPU |= NeonMode;
1215 HasFullFP16 = true;
1216 HasFP16FML = true;
1217 }
1218 if (Feature == "+mte")
1219 HasMTE = true;
1220 if (Feature == "+tme")
1221 HasTME = true;
1222 if (Feature == "+pauth")
1223 HasPAuth = true;
1224 if (Feature == "+i8mm")
1225 HasMatMul = true;
1226 if (Feature == "+bf16")
1227 HasBFloat16 = true;
1228 if (Feature == "+lse")
1229 HasLSE = true;
1230 if (Feature == "+ls64")
1231 HasLS64 = true;
1232 if (Feature == "+rand")
1233 HasRandGen = true;
1234 if (Feature == "+flagm")
1235 HasFlagM = true;
1236 if (Feature == "+altnzcv") {
1237 HasFlagM = true;
1238 HasAlternativeNZCV = true;
1239 }
1240 if (Feature == "+mops")
1241 HasMOPS = true;
1242 if (Feature == "+d128")
1243 HasD128 = true;
1244 if (Feature == "+gcs")
1245 HasGCS = true;
1246 if (Feature == "+rcpc3")
1247 HasRCPC3 = true;
1248 if (Feature == "+pauth-lr") {
1249 HasPAuthLR = true;
1250 HasPAuth = true;
1251 }
1252 }
1253
1254 // Check features that are manually disabled by command line options.
1255 // This needs to be checked after architecture-related features are handled,
1256 // making sure they are properly disabled when required.
1257 for (const auto &Feature : Features) {
1258 if (Feature == "-d128")
1259 HasD128 = false;
1260 }
1261
1262 setDataLayout();
1263 setArchFeatures();
1264
1265 if (HasNoFP) {
1266 FPU &= ~FPUMode;
1267 FPU &= ~NeonMode;
1268 FPU &= ~SveMode;
1269 }
1270 if (HasNoNeon) {
1271 FPU &= ~NeonMode;
1272 FPU &= ~SveMode;
1273 }
1274 if (HasNoSVE)
1275 FPU &= ~SveMode;
1276
1277 return true;
1278}
1279
1280// Parse AArch64 Target attributes, which are a comma separated list of:
1281// "arch=<arch>" - parsed to features as per -march=..
1282// "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
1283// "tune=<cpu>" - TuneCPU set to <cpu>
1284// "feature", "no-feature" - Add (or remove) feature.
1285// "+feature", "+nofeature" - Add (or remove) feature.
1286//
1287// A feature may correspond to an Extension (anything with a corresponding
1288// AEK_), in which case an ExtensionSet is used to parse it and expand its
1289// dependencies. If the feature does not yield a successful parse then it
1290// is passed through.
1291ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
1292 ParsedTargetAttr Ret;
1293 if (Features == "default")
1294 return Ret;
1295 SmallVector<StringRef, 1> AttrFeatures;
1296 Features.split(A&: AttrFeatures, Separator: ",");
1297 bool FoundArch = false;
1298
1299 auto SplitAndAddFeatures = [](StringRef FeatString,
1300 std::vector<std::string> &Features,
1301 llvm::AArch64::ExtensionSet &FeatureBits) {
1302 SmallVector<StringRef, 8> SplitFeatures;
1303 FeatString.split(A&: SplitFeatures, Separator: StringRef("+"), MaxSplit: -1, KeepEmpty: false);
1304 for (StringRef Feature : SplitFeatures) {
1305 if (FeatureBits.parseModifier(Modifier: Feature))
1306 continue;
1307 // Pass through anything that failed to parse so that we can emit
1308 // diagnostics, as well as valid internal feature names.
1309 //
1310 // FIXME: We should consider rejecting internal feature names like
1311 // neon, v8a, etc.
1312 // FIXME: We should consider emitting diagnostics here.
1313 if (Feature.starts_with(Prefix: "no"))
1314 Features.push_back(x: "-" + Feature.drop_front(N: 2).str());
1315 else
1316 Features.push_back(x: "+" + Feature.str());
1317 }
1318 };
1319
1320 llvm::AArch64::ExtensionSet FeatureBits;
1321 // Reconstruct the bitset from the command line option features.
1322 FeatureBits.reconstructFromParsedFeatures(getTargetOpts().FeaturesAsWritten,
1323 Ret.Features);
1324
1325 for (auto &Feature : AttrFeatures) {
1326 Feature = Feature.trim();
1327 if (Feature.starts_with(Prefix: "fpmath="))
1328 continue;
1329
1330 if (Feature.starts_with(Prefix: "branch-protection=")) {
1331 Ret.BranchProtection = Feature.split(Separator: '=').second.trim();
1332 continue;
1333 }
1334
1335 if (Feature.starts_with(Prefix: "arch=")) {
1336 if (FoundArch)
1337 Ret.Duplicate = "arch=";
1338 FoundArch = true;
1339 std::pair<StringRef, StringRef> Split =
1340 Feature.split(Separator: "=").second.trim().split(Separator: "+");
1341 const llvm::AArch64::ArchInfo *AI = llvm::AArch64::parseArch(Arch: Split.first);
1342
1343 // Parse the architecture version, adding the required features to
1344 // Ret.Features.
1345 if (!AI)
1346 continue;
1347 FeatureBits.addArchDefaults(Arch: *AI);
1348 // Add any extra features, after the +
1349 SplitAndAddFeatures(Split.second, Ret.Features, FeatureBits);
1350 } else if (Feature.starts_with(Prefix: "cpu=")) {
1351 if (!Ret.CPU.empty())
1352 Ret.Duplicate = "cpu=";
1353 else {
1354 // Split the cpu string into "cpu=", "cortex-a710" and any remaining
1355 // "+feat" features.
1356 std::pair<StringRef, StringRef> Split =
1357 Feature.split(Separator: "=").second.trim().split(Separator: "+");
1358 Ret.CPU = Split.first;
1359 if (auto CpuInfo = llvm::AArch64::parseCpu(Name: Ret.CPU)) {
1360 FeatureBits.addCPUDefaults(CPU: *CpuInfo);
1361 SplitAndAddFeatures(Split.second, Ret.Features, FeatureBits);
1362 }
1363 }
1364 } else if (Feature.starts_with(Prefix: "tune=")) {
1365 if (!Ret.Tune.empty())
1366 Ret.Duplicate = "tune=";
1367 else
1368 Ret.Tune = Feature.split(Separator: "=").second.trim();
1369 } else if (Feature.starts_with(Prefix: "+")) {
1370 SplitAndAddFeatures(Feature, Ret.Features, FeatureBits);
1371 } else {
1372 if (FeatureBits.parseModifier(Modifier: Feature, /* AllowNoDashForm = */ true))
1373 continue;
1374 // Pass through anything that failed to parse so that we can emit
1375 // diagnostics, as well as valid internal feature names.
1376 //
1377 // FIXME: We should consider rejecting internal feature names like
1378 // neon, v8a, etc.
1379 // FIXME: We should consider emitting diagnostics here.
1380 if (Feature.starts_with(Prefix: "no-"))
1381 Ret.Features.push_back(x: "-" + Feature.drop_front(N: 3).str());
1382 else
1383 Ret.Features.push_back(x: "+" + Feature.str());
1384 }
1385 }
1386 FeatureBits.toLLVMFeatureList(Ret.Features);
1387 return Ret;
1388}
1389
1390bool AArch64TargetInfo::hasBFloat16Type() const {
1391 return true;
1392}
1393
1394TargetInfo::CallingConvCheckResult
1395AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
1396 switch (CC) {
1397 case CC_C:
1398 case CC_Swift:
1399 case CC_SwiftAsync:
1400 case CC_PreserveMost:
1401 case CC_PreserveAll:
1402 case CC_PreserveNone:
1403 case CC_DeviceKernel:
1404 case CC_AArch64VectorCall:
1405 case CC_AArch64SVEPCS:
1406 case CC_Win64:
1407 return CCCR_OK;
1408 default:
1409 return CCCR_Warning;
1410 }
1411}
1412
1413bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
1414
1415TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
1416 return TargetInfo::AArch64ABIBuiltinVaList;
1417}
1418
1419const char *const AArch64TargetInfo::GCCRegNames[] = {
1420 // clang-format off
1421
1422 // 32-bit Integer registers
1423 "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
1424 "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
1425 "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
1426
1427 // 64-bit Integer registers
1428 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
1429 "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
1430 "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
1431
1432 // 32-bit floating point regsisters
1433 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
1434 "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
1435 "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
1436
1437 // 64-bit floating point regsisters
1438 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
1439 "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
1440 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
1441
1442 // Neon vector registers
1443 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
1444 "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
1445 "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
1446
1447 // SVE vector registers
1448 "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10",
1449 "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
1450 "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
1451
1452 // SVE predicate registers
1453 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10",
1454 "p11", "p12", "p13", "p14", "p15",
1455
1456 // SVE predicate-as-counter registers
1457 "pn0", "pn1", "pn2", "pn3", "pn4", "pn5", "pn6", "pn7", "pn8",
1458 "pn9", "pn10", "pn11", "pn12", "pn13", "pn14", "pn15",
1459
1460 // SME registers
1461 "za", "zt0",
1462
1463 // clang-format on
1464};
1465
1466ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
1467 return llvm::ArrayRef(GCCRegNames);
1468}
1469
1470const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
1471 {.Aliases: {"w31"}, .Register: "wsp"},
1472 {.Aliases: {"x31"}, .Register: "sp"},
1473 // GCC rN registers are aliases of xN registers.
1474 {.Aliases: {"r0"}, .Register: "x0"},
1475 {.Aliases: {"r1"}, .Register: "x1"},
1476 {.Aliases: {"r2"}, .Register: "x2"},
1477 {.Aliases: {"r3"}, .Register: "x3"},
1478 {.Aliases: {"r4"}, .Register: "x4"},
1479 {.Aliases: {"r5"}, .Register: "x5"},
1480 {.Aliases: {"r6"}, .Register: "x6"},
1481 {.Aliases: {"r7"}, .Register: "x7"},
1482 {.Aliases: {"r8"}, .Register: "x8"},
1483 {.Aliases: {"r9"}, .Register: "x9"},
1484 {.Aliases: {"r10"}, .Register: "x10"},
1485 {.Aliases: {"r11"}, .Register: "x11"},
1486 {.Aliases: {"r12"}, .Register: "x12"},
1487 {.Aliases: {"r13"}, .Register: "x13"},
1488 {.Aliases: {"r14"}, .Register: "x14"},
1489 {.Aliases: {"r15"}, .Register: "x15"},
1490 {.Aliases: {"r16"}, .Register: "x16"},
1491 {.Aliases: {"r17"}, .Register: "x17"},
1492 {.Aliases: {"r18"}, .Register: "x18"},
1493 {.Aliases: {"r19"}, .Register: "x19"},
1494 {.Aliases: {"r20"}, .Register: "x20"},
1495 {.Aliases: {"r21"}, .Register: "x21"},
1496 {.Aliases: {"r22"}, .Register: "x22"},
1497 {.Aliases: {"r23"}, .Register: "x23"},
1498 {.Aliases: {"r24"}, .Register: "x24"},
1499 {.Aliases: {"r25"}, .Register: "x25"},
1500 {.Aliases: {"r26"}, .Register: "x26"},
1501 {.Aliases: {"r27"}, .Register: "x27"},
1502 {.Aliases: {"r28"}, .Register: "x28"},
1503 {.Aliases: {"r29", "x29"}, .Register: "fp"},
1504 {.Aliases: {"r30", "x30"}, .Register: "lr"},
1505 // The S/D/Q and W/X registers overlap, but aren't really aliases; we
1506 // don't want to substitute one of these for a different-sized one.
1507};
1508
1509ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
1510 return llvm::ArrayRef(GCCRegAliases);
1511}
1512
1513// Returns the length of cc constraint.
1514static unsigned matchAsmCCConstraint(const char *Name) {
1515 constexpr unsigned len = 5;
1516 auto RV = llvm::StringSwitch<unsigned>(Name)
1517 .Case(S: "@cceq", Value: len)
1518 .Case(S: "@ccne", Value: len)
1519 .Case(S: "@cchs", Value: len)
1520 .Case(S: "@cccs", Value: len)
1521 .Case(S: "@cccc", Value: len)
1522 .Case(S: "@cclo", Value: len)
1523 .Case(S: "@ccmi", Value: len)
1524 .Case(S: "@ccpl", Value: len)
1525 .Case(S: "@ccvs", Value: len)
1526 .Case(S: "@ccvc", Value: len)
1527 .Case(S: "@cchi", Value: len)
1528 .Case(S: "@ccls", Value: len)
1529 .Case(S: "@ccge", Value: len)
1530 .Case(S: "@cclt", Value: len)
1531 .Case(S: "@ccgt", Value: len)
1532 .Case(S: "@ccle", Value: len)
1533 .Default(Value: 0);
1534 return RV;
1535}
1536
1537std::string
1538AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
1539 std::string R;
1540 switch (*Constraint) {
1541 case 'U': // Three-character constraint; add "@3" hint for later parsing.
1542 R = std::string("@3") + std::string(Constraint, 3);
1543 Constraint += 2;
1544 break;
1545 case '@':
1546 if (const unsigned Len = matchAsmCCConstraint(Name: Constraint)) {
1547 std::string Converted = "{" + std::string(Constraint, Len) + "}";
1548 Constraint += Len - 1;
1549 return Converted;
1550 }
1551 return std::string(1, *Constraint);
1552 default:
1553 R = TargetInfo::convertConstraint(Constraint);
1554 break;
1555 }
1556 return R;
1557}
1558
1559bool AArch64TargetInfo::validateAsmConstraint(
1560 const char *&Name, TargetInfo::ConstraintInfo &Info) const {
1561 switch (*Name) {
1562 default:
1563 return false;
1564 case 'w': // Floating point and SIMD registers (V0-V31)
1565 Info.setAllowsRegister();
1566 return true;
1567 case 'I': // Constant that can be used with an ADD instruction
1568 case 'J': // Constant that can be used with a SUB instruction
1569 case 'K': // Constant that can be used with a 32-bit logical instruction
1570 case 'L': // Constant that can be used with a 64-bit logical instruction
1571 case 'M': // Constant that can be used as a 32-bit MOV immediate
1572 case 'N': // Constant that can be used as a 64-bit MOV immediate
1573 case 'Y': // Floating point constant zero
1574 case 'Z': // Integer constant zero
1575 return true;
1576 case 'Q': // A memory reference with base register and no offset
1577 Info.setAllowsMemory();
1578 return true;
1579 case 'S': // A symbolic address
1580 Info.setAllowsRegister();
1581 return true;
1582 case 'U':
1583 if (Name[1] == 'p' &&
1584 (Name[2] == 'l' || Name[2] == 'a' || Name[2] == 'h')) {
1585 // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7, "Uph"=P8-P15)
1586 Info.setAllowsRegister();
1587 Name += 2;
1588 return true;
1589 }
1590 if (Name[1] == 'c' && (Name[2] == 'i' || Name[2] == 'j')) {
1591 // Gpr registers ("Uci"=w8-11, "Ucj"=w12-15)
1592 Info.setAllowsRegister();
1593 Name += 2;
1594 return true;
1595 }
1596 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
1597 // Utf: A memory address suitable for ldp/stp in TF mode.
1598 // Usa: An absolute symbolic address.
1599 // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
1600
1601 // Better to return an error saying that it's an unrecognised constraint
1602 // even if this is a valid constraint in gcc.
1603 return false;
1604 case 'z': // Zero register, wzr or xzr
1605 Info.setAllowsRegister();
1606 return true;
1607 case 'x': // Floating point and SIMD registers (V0-V15)
1608 Info.setAllowsRegister();
1609 return true;
1610 case 'y': // SVE registers (V0-V7)
1611 Info.setAllowsRegister();
1612 return true;
1613 case '@':
1614 // CC condition
1615 if (const unsigned Len = matchAsmCCConstraint(Name)) {
1616 Name += Len - 1;
1617 Info.setAllowsRegister();
1618 return true;
1619 }
1620 }
1621 return false;
1622}
1623
1624bool AArch64TargetInfo::validateConstraintModifier(
1625 StringRef Constraint, char Modifier, unsigned Size,
1626 std::string &SuggestedModifier) const {
1627 // Strip off constraint modifiers.
1628 Constraint = Constraint.ltrim(Chars: "=+&");
1629
1630 switch (Constraint[0]) {
1631 default:
1632 return true;
1633 case 'z':
1634 case 'r': {
1635 switch (Modifier) {
1636 case 'x':
1637 case 'w':
1638 // For now assume that the person knows what they're
1639 // doing with the modifier.
1640 return true;
1641 default:
1642 // By default an 'r' constraint will be in the 'x'
1643 // registers.
1644 if (Size == 64)
1645 return true;
1646
1647 if (Size == 512)
1648 return HasLS64;
1649
1650 SuggestedModifier = "w";
1651 return false;
1652 }
1653 }
1654 }
1655}
1656
1657std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
1658
1659int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
1660 if (RegNo == 0)
1661 return 0;
1662 if (RegNo == 1)
1663 return 1;
1664 return -1;
1665}
1666
1667bool AArch64TargetInfo::validatePointerAuthKey(
1668 const llvm::APSInt &value) const {
1669 return 0 <= value && value <= 3;
1670}
1671
1672bool AArch64TargetInfo::hasInt128Type() const { return true; }
1673
1674AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
1675 const TargetOptions &Opts)
1676 : AArch64TargetInfo(Triple, Opts) {}
1677
1678void AArch64leTargetInfo::setDataLayout() {
1679 if (getTriple().isOSBinFormatMachO()) {
1680 if(getTriple().isArch32Bit())
1681 resetDataLayout(DL: "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-"
1682 "i128:128-n32:64-S128-Fn32",
1683 UserLabelPrefix: "_");
1684 else
1685 resetDataLayout(DL: "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-"
1686 "n32:64-S128-Fn32",
1687 UserLabelPrefix: "_");
1688 } else
1689 resetDataLayout(DL: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-"
1690 "i64:64-i128:128-n32:64-S128-Fn32");
1691}
1692
1693void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
1694 MacroBuilder &Builder) const {
1695 Builder.defineMacro(Name: "__AARCH64EL__");
1696 AArch64TargetInfo::getTargetDefines(Opts, Builder);
1697}
1698
1699AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
1700 const TargetOptions &Opts)
1701 : AArch64TargetInfo(Triple, Opts) {}
1702
1703void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
1704 MacroBuilder &Builder) const {
1705 Builder.defineMacro(Name: "__AARCH64EB__");
1706 Builder.defineMacro(Name: "__AARCH_BIG_ENDIAN");
1707 Builder.defineMacro(Name: "__ARM_BIG_ENDIAN");
1708 AArch64TargetInfo::getTargetDefines(Opts, Builder);
1709}
1710
1711void AArch64beTargetInfo::setDataLayout() {
1712 assert(!getTriple().isOSBinFormatMachO());
1713 resetDataLayout(DL: "E-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-"
1714 "i64:64-i128:128-n32:64-S128-Fn32");
1715}
1716
1717WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
1718 const TargetOptions &Opts)
1719 : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
1720
1721 // This is an LLP64 platform.
1722 // int:4, long:4, long long:8, long double:8.
1723 IntWidth = IntAlign = 32;
1724 LongWidth = LongAlign = 32;
1725 DoubleAlign = LongLongAlign = 64;
1726 LongDoubleWidth = LongDoubleAlign = 64;
1727 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1728 IntMaxType = SignedLongLong;
1729 Int64Type = SignedLongLong;
1730 SizeType = UnsignedLongLong;
1731 PtrDiffType = SignedLongLong;
1732 IntPtrType = SignedLongLong;
1733}
1734
1735void WindowsARM64TargetInfo::setDataLayout() {
1736 resetDataLayout(DL: Triple.isOSBinFormatMachO()
1737 ? "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:"
1738 "128-n32:64-S128-Fn32"
1739 : "e-m:w-p270:32:32-p271:32:32-p272:64:64-p:64:64-i32:32-"
1740 "i64:64-i128:128-n32:64-S128-Fn32",
1741 UserLabelPrefix: Triple.isOSBinFormatMachO() ? "_" : "");
1742}
1743
1744TargetInfo::BuiltinVaListKind
1745WindowsARM64TargetInfo::getBuiltinVaListKind() const {
1746 return TargetInfo::CharPtrBuiltinVaList;
1747}
1748
1749TargetInfo::CallingConvCheckResult
1750WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
1751 switch (CC) {
1752 case CC_X86VectorCall:
1753 if (getTriple().isWindowsArm64EC())
1754 return CCCR_OK;
1755 return CCCR_Ignore;
1756 case CC_X86StdCall:
1757 case CC_X86ThisCall:
1758 case CC_X86FastCall:
1759 return CCCR_Ignore;
1760 case CC_C:
1761 case CC_DeviceKernel:
1762 case CC_PreserveMost:
1763 case CC_PreserveAll:
1764 case CC_PreserveNone:
1765 case CC_Swift:
1766 case CC_SwiftAsync:
1767 case CC_Win64:
1768 return CCCR_OK;
1769 default:
1770 return CCCR_Warning;
1771 }
1772}
1773
1774MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
1775 const TargetOptions &Opts)
1776 : WindowsARM64TargetInfo(Triple, Opts) {
1777 TheCXXABI.set(TargetCXXABI::Microsoft);
1778}
1779
1780void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
1781 MacroBuilder &Builder) const {
1782 WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
1783 if (getTriple().isWindowsArm64EC()) {
1784 Builder.defineMacro(Name: "_M_X64", Value: "100");
1785 Builder.defineMacro(Name: "_M_AMD64", Value: "100");
1786 Builder.defineMacro(Name: "_M_ARM64EC", Value: "1");
1787 } else {
1788 Builder.defineMacro(Name: "_M_ARM64", Value: "1");
1789 }
1790}
1791
1792TargetInfo::CallingConvKind
1793MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
1794 return CCK_MicrosoftWin64;
1795}
1796
1797unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize,
1798 bool HasNonWeakDef) const {
1799 unsigned Align =
1800 WindowsARM64TargetInfo::getMinGlobalAlign(Size: TypeSize, HasNonWeakDef);
1801
1802 // MSVC does size based alignment for arm64 based on alignment section in
1803 // below document, replicate that to keep alignment consistent with object
1804 // files compiled by MSVC.
1805 // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
1806 if (TypeSize >= 512) { // TypeSize >= 64 bytes
1807 Align = std::max(a: Align, b: 128u); // align type at least 16 bytes
1808 } else if (TypeSize >= 64) { // TypeSize >= 8 bytes
1809 Align = std::max(a: Align, b: 64u); // align type at least 8 butes
1810 } else if (TypeSize >= 16) { // TypeSize >= 2 bytes
1811 Align = std::max(a: Align, b: 32u); // align type at least 4 bytes
1812 }
1813 return Align;
1814}
1815
1816MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1817 const TargetOptions &Opts)
1818 : WindowsARM64TargetInfo(Triple, Opts) {
1819 TheCXXABI.set(TargetCXXABI::GenericAArch64);
1820}
1821
1822AppleMachOAArch64TargetInfo::AppleMachOAArch64TargetInfo(
1823 const llvm::Triple &Triple, const TargetOptions &Opts)
1824 : AppleMachOTargetInfo<AArch64leTargetInfo>(Triple, Opts) {}
1825
1826DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1827 const TargetOptions &Opts)
1828 : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1829 Int64Type = SignedLongLong;
1830 if (getTriple().isArch32Bit())
1831 IntMaxType = SignedLongLong;
1832
1833 WCharType = SignedInt;
1834 UseSignedCharForObjCBool = false;
1835
1836 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1837 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1838
1839 UseZeroLengthBitfieldAlignment = false;
1840
1841 if (getTriple().isArch32Bit()) {
1842 UseBitFieldTypeAlignment = false;
1843 ZeroLengthBitfieldBoundary = 32;
1844 UseZeroLengthBitfieldAlignment = true;
1845 TheCXXABI.set(TargetCXXABI::WatchOS);
1846 } else
1847 TheCXXABI.set(TargetCXXABI::AppleARM64);
1848}
1849
1850void clang::targets::getAppleMachOAArch64Defines(MacroBuilder &Builder,
1851 const LangOptions &Opts,
1852 const llvm::Triple &Triple) {
1853 Builder.defineMacro(Name: "__AARCH64_SIMD__");
1854 if (Triple.isArch32Bit())
1855 Builder.defineMacro(Name: "__ARM64_ARCH_8_32__");
1856 else
1857 Builder.defineMacro(Name: "__ARM64_ARCH_8__");
1858 Builder.defineMacro(Name: "__ARM_NEON__");
1859 Builder.defineMacro(Name: "__REGISTER_PREFIX__", Value: "");
1860 Builder.defineMacro(Name: "__arm64", Value: "1");
1861 Builder.defineMacro(Name: "__arm64__", Value: "1");
1862
1863 if (Triple.isArm64e())
1864 Builder.defineMacro(Name: "__arm64e__", Value: "1");
1865}
1866
1867void AppleMachOAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1868 const llvm::Triple &Triple,
1869 MacroBuilder &Builder) const {
1870 getAppleMachOAArch64Defines(Builder, Opts, Triple);
1871 AppleMachOTargetInfo<AArch64leTargetInfo>::getOSDefines(Opts, Triple,
1872 Builder);
1873}
1874
1875void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1876 const llvm::Triple &Triple,
1877 MacroBuilder &Builder) const {
1878 getAppleMachOAArch64Defines(Builder, Opts, Triple);
1879 DarwinTargetInfo<AArch64leTargetInfo>::getOSDefines(Opts, Triple, Builder);
1880}
1881
1882TargetInfo::BuiltinVaListKind
1883DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1884 return TargetInfo::CharPtrBuiltinVaList;
1885}
1886

Provided by KDAB

Privacy Policy
Learn to use CMake with our Intro Training
Find out more

source code of clang/lib/Basic/Targets/AArch64.cpp