1//===------ SemaARM.cpp ---------- ARM target-specific routines -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements semantic analysis functions specific to ARM.
10//
11//===----------------------------------------------------------------------===//
12
13#include "clang/Sema/SemaARM.h"
14#include "clang/Basic/DiagnosticSema.h"
15#include "clang/Basic/TargetBuiltins.h"
16#include "clang/Basic/TargetInfo.h"
17#include "clang/Sema/Initialization.h"
18#include "clang/Sema/ParsedAttr.h"
19#include "clang/Sema/Sema.h"
20
21namespace clang {
22
23SemaARM::SemaARM(Sema &S) : SemaBase(S) {}
24
25/// BuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
26bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID,
27 CallExpr *TheCall) {
28 ASTContext &Context = getASTContext();
29
30 if (BuiltinID == AArch64::BI__builtin_arm_irg) {
31 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 2))
32 return true;
33 Expr *Arg0 = TheCall->getArg(Arg: 0);
34 Expr *Arg1 = TheCall->getArg(Arg: 1);
35
36 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0);
37 if (FirstArg.isInvalid())
38 return true;
39 QualType FirstArgType = FirstArg.get()->getType();
40 if (!FirstArgType->isAnyPointerType())
41 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer)
42 << "first" << FirstArgType << Arg0->getSourceRange();
43 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
44
45 ExprResult SecArg = SemaRef.DefaultLvalueConversion(E: Arg1);
46 if (SecArg.isInvalid())
47 return true;
48 QualType SecArgType = SecArg.get()->getType();
49 if (!SecArgType->isIntegerType())
50 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_integer)
51 << "second" << SecArgType << Arg1->getSourceRange();
52
53 // Derive the return type from the pointer argument.
54 TheCall->setType(FirstArgType);
55 return false;
56 }
57
58 if (BuiltinID == AArch64::BI__builtin_arm_addg) {
59 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 2))
60 return true;
61
62 Expr *Arg0 = TheCall->getArg(Arg: 0);
63 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0);
64 if (FirstArg.isInvalid())
65 return true;
66 QualType FirstArgType = FirstArg.get()->getType();
67 if (!FirstArgType->isAnyPointerType())
68 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer)
69 << "first" << FirstArgType << Arg0->getSourceRange();
70 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
71
72 // Derive the return type from the pointer argument.
73 TheCall->setType(FirstArgType);
74
75 // Second arg must be an constant in range [0,15]
76 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
77 }
78
79 if (BuiltinID == AArch64::BI__builtin_arm_gmi) {
80 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 2))
81 return true;
82 Expr *Arg0 = TheCall->getArg(Arg: 0);
83 Expr *Arg1 = TheCall->getArg(Arg: 1);
84
85 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0);
86 if (FirstArg.isInvalid())
87 return true;
88 QualType FirstArgType = FirstArg.get()->getType();
89 if (!FirstArgType->isAnyPointerType())
90 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer)
91 << "first" << FirstArgType << Arg0->getSourceRange();
92
93 QualType SecArgType = Arg1->getType();
94 if (!SecArgType->isIntegerType())
95 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_integer)
96 << "second" << SecArgType << Arg1->getSourceRange();
97 TheCall->setType(Context.IntTy);
98 return false;
99 }
100
101 if (BuiltinID == AArch64::BI__builtin_arm_ldg ||
102 BuiltinID == AArch64::BI__builtin_arm_stg) {
103 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 1))
104 return true;
105 Expr *Arg0 = TheCall->getArg(Arg: 0);
106 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0);
107 if (FirstArg.isInvalid())
108 return true;
109
110 QualType FirstArgType = FirstArg.get()->getType();
111 if (!FirstArgType->isAnyPointerType())
112 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer)
113 << "first" << FirstArgType << Arg0->getSourceRange();
114 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
115
116 // Derive the return type from the pointer argument.
117 if (BuiltinID == AArch64::BI__builtin_arm_ldg)
118 TheCall->setType(FirstArgType);
119 return false;
120 }
121
122 if (BuiltinID == AArch64::BI__builtin_arm_subp) {
123 Expr *ArgA = TheCall->getArg(Arg: 0);
124 Expr *ArgB = TheCall->getArg(Arg: 1);
125
126 ExprResult ArgExprA = SemaRef.DefaultFunctionArrayLvalueConversion(E: ArgA);
127 ExprResult ArgExprB = SemaRef.DefaultFunctionArrayLvalueConversion(E: ArgB);
128
129 if (ArgExprA.isInvalid() || ArgExprB.isInvalid())
130 return true;
131
132 QualType ArgTypeA = ArgExprA.get()->getType();
133 QualType ArgTypeB = ArgExprB.get()->getType();
134
135 auto isNull = [&](Expr *E) -> bool {
136 return E->isNullPointerConstant(Ctx&: Context,
137 NPC: Expr::NPC_ValueDependentIsNotNull);
138 };
139
140 // argument should be either a pointer or null
141 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA))
142 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_null_or_pointer)
143 << "first" << ArgTypeA << ArgA->getSourceRange();
144
145 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB))
146 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_null_or_pointer)
147 << "second" << ArgTypeB << ArgB->getSourceRange();
148
149 // Ensure Pointee types are compatible
150 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) &&
151 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) {
152 QualType pointeeA = ArgTypeA->getPointeeType();
153 QualType pointeeB = ArgTypeB->getPointeeType();
154 if (!Context.typesAreCompatible(
155 T1: Context.getCanonicalType(T: pointeeA).getUnqualifiedType(),
156 T2: Context.getCanonicalType(T: pointeeB).getUnqualifiedType())) {
157 return Diag(Loc: TheCall->getBeginLoc(),
158 DiagID: diag::err_typecheck_sub_ptr_compatible)
159 << ArgTypeA << ArgTypeB << ArgA->getSourceRange()
160 << ArgB->getSourceRange();
161 }
162 }
163
164 // at least one argument should be pointer type
165 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType())
166 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_any2arg_pointer)
167 << ArgTypeA << ArgTypeB << ArgA->getSourceRange();
168
169 if (isNull(ArgA)) // adopt type of the other pointer
170 ArgExprA =
171 SemaRef.ImpCastExprToType(E: ArgExprA.get(), Type: ArgTypeB, CK: CK_NullToPointer);
172
173 if (isNull(ArgB))
174 ArgExprB =
175 SemaRef.ImpCastExprToType(E: ArgExprB.get(), Type: ArgTypeA, CK: CK_NullToPointer);
176
177 TheCall->setArg(Arg: 0, ArgExpr: ArgExprA.get());
178 TheCall->setArg(Arg: 1, ArgExpr: ArgExprB.get());
179 TheCall->setType(Context.LongLongTy);
180 return false;
181 }
182 assert(false && "Unhandled ARM MTE intrinsic");
183 return true;
184}
185
186/// BuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
187/// TheCall is an ARM/AArch64 special register string literal.
188bool SemaARM::BuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
189 int ArgNum, unsigned ExpectedFieldNum,
190 bool AllowName) {
191 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
192 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
193 BuiltinID == ARM::BI__builtin_arm_rsr ||
194 BuiltinID == ARM::BI__builtin_arm_rsrp ||
195 BuiltinID == ARM::BI__builtin_arm_wsr ||
196 BuiltinID == ARM::BI__builtin_arm_wsrp;
197 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
198 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
199 BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
200 BuiltinID == AArch64::BI__builtin_arm_wsr128 ||
201 BuiltinID == AArch64::BI__builtin_arm_rsr ||
202 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
203 BuiltinID == AArch64::BI__builtin_arm_wsr ||
204 BuiltinID == AArch64::BI__builtin_arm_wsrp;
205 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin.");
206
207 // We can't check the value of a dependent argument.
208 Expr *Arg = TheCall->getArg(Arg: ArgNum);
209 if (Arg->isTypeDependent() || Arg->isValueDependent())
210 return false;
211
212 // Check if the argument is a string literal.
213 if (!isa<StringLiteral>(Val: Arg->IgnoreParenImpCasts()))
214 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_expr_not_string_literal)
215 << Arg->getSourceRange();
216
217 // Check the type of special register given.
218 StringRef Reg = cast<StringLiteral>(Val: Arg->IgnoreParenImpCasts())->getString();
219 SmallVector<StringRef, 6> Fields;
220 Reg.split(A&: Fields, Separator: ":");
221
222 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
223 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_arm_invalid_specialreg)
224 << Arg->getSourceRange();
225
226 // If the string is the name of a register then we cannot check that it is
227 // valid here but if the string is of one the forms described in ACLE then we
228 // can check that the supplied fields are integers and within the valid
229 // ranges.
230 if (Fields.size() > 1) {
231 bool FiveFields = Fields.size() == 5;
232
233 bool ValidString = true;
234 if (IsARMBuiltin) {
235 ValidString &= Fields[0].starts_with_insensitive(Prefix: "cp") ||
236 Fields[0].starts_with_insensitive(Prefix: "p");
237 if (ValidString)
238 Fields[0] = Fields[0].drop_front(
239 N: Fields[0].starts_with_insensitive(Prefix: "cp") ? 2 : 1);
240
241 ValidString &= Fields[2].starts_with_insensitive(Prefix: "c");
242 if (ValidString)
243 Fields[2] = Fields[2].drop_front(N: 1);
244
245 if (FiveFields) {
246 ValidString &= Fields[3].starts_with_insensitive(Prefix: "c");
247 if (ValidString)
248 Fields[3] = Fields[3].drop_front(N: 1);
249 }
250 }
251
252 SmallVector<int, 5> FieldBitWidths;
253 if (FiveFields)
254 FieldBitWidths.append(IL: {IsAArch64Builtin ? 2 : 4, 3, 4, 4, 3});
255 else
256 FieldBitWidths.append(IL: {4, 3, 4});
257
258 for (unsigned i = 0; i < Fields.size(); ++i) {
259 int IntField;
260 ValidString &= !Fields[i].getAsInteger(Radix: 10, Result&: IntField);
261 ValidString &= (IntField >= 0 && IntField < (1 << FieldBitWidths[i]));
262 }
263
264 if (!ValidString)
265 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_arm_invalid_specialreg)
266 << Arg->getSourceRange();
267 } else if (IsAArch64Builtin && Fields.size() == 1) {
268 // This code validates writes to PSTATE registers.
269
270 // Not a write.
271 if (TheCall->getNumArgs() != 2)
272 return false;
273
274 // The 128-bit system register accesses do not touch PSTATE.
275 if (BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
276 BuiltinID == AArch64::BI__builtin_arm_wsr128)
277 return false;
278
279 // These are the named PSTATE accesses using "MSR (immediate)" instructions,
280 // along with the upper limit on the immediates allowed.
281 auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg)
282 .CaseLower(S: "spsel", Value: 15)
283 .CaseLower(S: "daifclr", Value: 15)
284 .CaseLower(S: "daifset", Value: 15)
285 .CaseLower(S: "pan", Value: 15)
286 .CaseLower(S: "uao", Value: 15)
287 .CaseLower(S: "dit", Value: 15)
288 .CaseLower(S: "ssbs", Value: 15)
289 .CaseLower(S: "tco", Value: 15)
290 .CaseLower(S: "allint", Value: 1)
291 .CaseLower(S: "pm", Value: 1)
292 .Default(Value: std::nullopt);
293
294 // If this is not a named PSTATE, just continue without validating, as this
295 // will be lowered to an "MSR (register)" instruction directly
296 if (!MaxLimit)
297 return false;
298
299 // Here we only allow constants in the range for that pstate, as required by
300 // the ACLE.
301 //
302 // While clang also accepts the names of system registers in its ACLE
303 // intrinsics, we prevent this with the PSTATE names used in MSR (immediate)
304 // as the value written via a register is different to the value used as an
305 // immediate to have the same effect. e.g., for the instruction `msr tco,
306 // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but
307 // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO.
308 //
309 // If a programmer wants to codegen the MSR (register) form of `msr tco,
310 // xN`, they can still do so by specifying the register using five
311 // colon-separated numbers in a string.
312 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: *MaxLimit);
313 }
314
315 return false;
316}
317
318/// getNeonEltType - Return the QualType corresponding to the elements of
319/// the vector type specified by the NeonTypeFlags. This is used to check
320/// the pointer arguments for Neon load/store intrinsics.
321static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
322 bool IsPolyUnsigned, bool IsInt64Long) {
323 switch (Flags.getEltType()) {
324 case NeonTypeFlags::Int8:
325 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
326 case NeonTypeFlags::Int16:
327 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
328 case NeonTypeFlags::Int32:
329 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
330 case NeonTypeFlags::Int64:
331 if (IsInt64Long)
332 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
333 else
334 return Flags.isUnsigned() ? Context.UnsignedLongLongTy
335 : Context.LongLongTy;
336 case NeonTypeFlags::Poly8:
337 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
338 case NeonTypeFlags::Poly16:
339 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
340 case NeonTypeFlags::Poly64:
341 if (IsInt64Long)
342 return Context.UnsignedLongTy;
343 else
344 return Context.UnsignedLongLongTy;
345 case NeonTypeFlags::Poly128:
346 break;
347 case NeonTypeFlags::Float16:
348 return Context.HalfTy;
349 case NeonTypeFlags::Float32:
350 return Context.FloatTy;
351 case NeonTypeFlags::Float64:
352 return Context.DoubleTy;
353 case NeonTypeFlags::BFloat16:
354 return Context.BFloat16Ty;
355 case NeonTypeFlags::MFloat8:
356 return Context.MFloat8Ty;
357 }
358 llvm_unreachable("Invalid NeonTypeFlag!");
359}
360
361enum ArmSMEState : unsigned {
362 ArmNoState = 0,
363
364 ArmInZA = 0b01,
365 ArmOutZA = 0b10,
366 ArmInOutZA = 0b11,
367 ArmZAMask = 0b11,
368
369 ArmInZT0 = 0b01 << 2,
370 ArmOutZT0 = 0b10 << 2,
371 ArmInOutZT0 = 0b11 << 2,
372 ArmZT0Mask = 0b11 << 2
373};
374
375bool SemaARM::CheckImmediateArg(CallExpr *TheCall, unsigned CheckTy,
376 unsigned ArgIdx, unsigned EltBitWidth,
377 unsigned ContainerBitWidth) {
378 // Function that checks whether the operand (ArgIdx) is an immediate
379 // that is one of a given set of values.
380 auto CheckImmediateInSet = [&](std::initializer_list<int64_t> Set,
381 int ErrDiag) -> bool {
382 // We can't check the value of a dependent argument.
383 Expr *Arg = TheCall->getArg(Arg: ArgIdx);
384 if (Arg->isTypeDependent() || Arg->isValueDependent())
385 return false;
386
387 // Check constant-ness first.
388 llvm::APSInt Imm;
389 if (SemaRef.BuiltinConstantArg(TheCall, ArgNum: ArgIdx, Result&: Imm))
390 return true;
391
392 if (!llvm::is_contained(Set, Element: Imm.getSExtValue()))
393 return Diag(Loc: TheCall->getBeginLoc(), DiagID: ErrDiag) << Arg->getSourceRange();
394 return false;
395 };
396
397 switch ((ImmCheckType)CheckTy) {
398 case ImmCheckType::ImmCheck0_31:
399 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 31))
400 return true;
401 break;
402 case ImmCheckType::ImmCheck0_13:
403 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 13))
404 return true;
405 break;
406 case ImmCheckType::ImmCheck0_63:
407 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 63))
408 return true;
409 break;
410 case ImmCheckType::ImmCheck1_16:
411 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 16))
412 return true;
413 break;
414 case ImmCheckType::ImmCheck0_7:
415 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 7))
416 return true;
417 break;
418 case ImmCheckType::ImmCheck1_1:
419 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 1))
420 return true;
421 break;
422 case ImmCheckType::ImmCheck1_3:
423 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 3))
424 return true;
425 break;
426 case ImmCheckType::ImmCheck1_7:
427 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 7))
428 return true;
429 break;
430 case ImmCheckType::ImmCheckExtract:
431 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0,
432 High: (2048 / EltBitWidth) - 1))
433 return true;
434 break;
435 case ImmCheckType::ImmCheckCvt:
436 case ImmCheckType::ImmCheckShiftRight:
437 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: EltBitWidth))
438 return true;
439 break;
440 case ImmCheckType::ImmCheckShiftRightNarrow:
441 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: EltBitWidth / 2))
442 return true;
443 break;
444 case ImmCheckType::ImmCheckShiftLeft:
445 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: EltBitWidth - 1))
446 return true;
447 break;
448 case ImmCheckType::ImmCheckLaneIndex:
449 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0,
450 High: (ContainerBitWidth / EltBitWidth) - 1))
451 return true;
452 break;
453 case ImmCheckType::ImmCheckLaneIndexCompRotate:
454 if (SemaRef.BuiltinConstantArgRange(
455 TheCall, ArgNum: ArgIdx, Low: 0, High: (ContainerBitWidth / (2 * EltBitWidth)) - 1))
456 return true;
457 break;
458 case ImmCheckType::ImmCheckLaneIndexDot:
459 if (SemaRef.BuiltinConstantArgRange(
460 TheCall, ArgNum: ArgIdx, Low: 0, High: (ContainerBitWidth / (4 * EltBitWidth)) - 1))
461 return true;
462 break;
463 case ImmCheckType::ImmCheckComplexRot90_270:
464 if (CheckImmediateInSet({90, 270}, diag::err_rotation_argument_to_cadd))
465 return true;
466 break;
467 case ImmCheckType::ImmCheckComplexRotAll90:
468 if (CheckImmediateInSet({0, 90, 180, 270},
469 diag::err_rotation_argument_to_cmla))
470 return true;
471 break;
472 case ImmCheckType::ImmCheck0_1:
473 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 1))
474 return true;
475 break;
476 case ImmCheckType::ImmCheck0_2:
477 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 2))
478 return true;
479 break;
480 case ImmCheckType::ImmCheck0_3:
481 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 3))
482 return true;
483 break;
484 case ImmCheckType::ImmCheck0_0:
485 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 0))
486 return true;
487 break;
488 case ImmCheckType::ImmCheck0_15:
489 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 15))
490 return true;
491 break;
492 case ImmCheckType::ImmCheck0_255:
493 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 255))
494 return true;
495 break;
496 case ImmCheckType::ImmCheck1_32:
497 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 32))
498 return true;
499 break;
500 case ImmCheckType::ImmCheck1_64:
501 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 64))
502 return true;
503 break;
504 case ImmCheckType::ImmCheck2_4_Mul2:
505 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 2, High: 4) ||
506 SemaRef.BuiltinConstantArgMultiple(TheCall, ArgNum: ArgIdx, Multiple: 2))
507 return true;
508 break;
509 }
510 return false;
511}
512
513bool SemaARM::PerformNeonImmChecks(
514 CallExpr *TheCall,
515 SmallVectorImpl<std::tuple<int, int, int, int>> &ImmChecks,
516 int OverloadType) {
517 bool HasError = false;
518
519 for (const auto &I : ImmChecks) {
520 auto [ArgIdx, CheckTy, ElementBitWidth, VecBitWidth] = I;
521
522 if (OverloadType >= 0)
523 ElementBitWidth = NeonTypeFlags(OverloadType).getEltSizeInBits();
524
525 HasError |= CheckImmediateArg(TheCall, CheckTy, ArgIdx, EltBitWidth: ElementBitWidth,
526 ContainerBitWidth: VecBitWidth);
527 }
528
529 return HasError;
530}
531
532bool SemaARM::PerformSVEImmChecks(
533 CallExpr *TheCall, SmallVectorImpl<std::tuple<int, int, int>> &ImmChecks) {
534 bool HasError = false;
535
536 for (const auto &I : ImmChecks) {
537 auto [ArgIdx, CheckTy, ElementBitWidth] = I;
538 HasError |=
539 CheckImmediateArg(TheCall, CheckTy, ArgIdx, EltBitWidth: ElementBitWidth, ContainerBitWidth: 128);
540 }
541
542 return HasError;
543}
544
545SemaARM::ArmStreamingType getArmStreamingFnType(const FunctionDecl *FD) {
546 if (FD->hasAttr<ArmLocallyStreamingAttr>())
547 return SemaARM::ArmStreaming;
548 if (const Type *Ty = FD->getType().getTypePtrOrNull()) {
549 if (const auto *FPT = Ty->getAs<FunctionProtoType>()) {
550 if (FPT->getAArch64SMEAttributes() &
551 FunctionType::SME_PStateSMEnabledMask)
552 return SemaARM::ArmStreaming;
553 if (FPT->getAArch64SMEAttributes() &
554 FunctionType::SME_PStateSMCompatibleMask)
555 return SemaARM::ArmStreamingCompatible;
556 }
557 }
558 return SemaARM::ArmNonStreaming;
559}
560
561static bool checkArmStreamingBuiltin(Sema &S, CallExpr *TheCall,
562 const FunctionDecl *FD,
563 SemaARM::ArmStreamingType BuiltinType,
564 unsigned BuiltinID) {
565 SemaARM::ArmStreamingType FnType = getArmStreamingFnType(FD);
566
567 // Check if the intrinsic is available in the right mode, i.e.
568 // * When compiling for SME only, the caller must be in streaming mode.
569 // * When compiling for SVE only, the caller must be in non-streaming mode.
570 // * When compiling for both SVE and SME, the caller can be in either mode.
571 if (BuiltinType == SemaARM::VerifyRuntimeMode) {
572 llvm::StringMap<bool> CallerFeatures;
573 S.Context.getFunctionFeatureMap(FeatureMap&: CallerFeatures, FD);
574
575 // Avoid emitting diagnostics for a function that can never compile.
576 if (FnType == SemaARM::ArmStreaming && !CallerFeatures["sme"])
577 return false;
578
579 const auto FindTopLevelPipe = [](const char *S) {
580 unsigned Depth = 0;
581 unsigned I = 0, E = strlen(s: S);
582 for (; I < E; ++I) {
583 if (S[I] == '|' && Depth == 0)
584 break;
585 if (S[I] == '(')
586 ++Depth;
587 else if (S[I] == ')')
588 --Depth;
589 }
590 return I;
591 };
592
593 const char *RequiredFeatures =
594 S.Context.BuiltinInfo.getRequiredFeatures(ID: BuiltinID);
595 unsigned PipeIdx = FindTopLevelPipe(RequiredFeatures);
596 assert(PipeIdx != 0 && PipeIdx != strlen(RequiredFeatures) &&
597 "Expected feature string of the form 'SVE-EXPR|SME-EXPR'");
598 StringRef NonStreamingBuiltinGuard = StringRef(RequiredFeatures, PipeIdx);
599 StringRef StreamingBuiltinGuard = StringRef(RequiredFeatures + PipeIdx + 1);
600
601 bool SatisfiesSVE = Builtin::evaluateRequiredTargetFeatures(
602 RequiredFatures: NonStreamingBuiltinGuard, TargetFetureMap: CallerFeatures);
603 bool SatisfiesSME = Builtin::evaluateRequiredTargetFeatures(
604 RequiredFatures: StreamingBuiltinGuard, TargetFetureMap: CallerFeatures);
605
606 if ((SatisfiesSVE && SatisfiesSME) ||
607 (SatisfiesSVE && FnType == SemaARM::ArmStreamingCompatible))
608 return false;
609 else if (SatisfiesSVE)
610 BuiltinType = SemaARM::ArmNonStreaming;
611 else if (SatisfiesSME)
612 BuiltinType = SemaARM::ArmStreaming;
613 else
614 // This should be diagnosed by CodeGen
615 return false;
616 }
617
618 if (FnType != SemaARM::ArmNonStreaming &&
619 BuiltinType == SemaARM::ArmNonStreaming)
620 S.Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_attribute_arm_sm_incompat_builtin)
621 << TheCall->getSourceRange() << "non-streaming";
622 else if (FnType != SemaARM::ArmStreaming &&
623 BuiltinType == SemaARM::ArmStreaming)
624 S.Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_attribute_arm_sm_incompat_builtin)
625 << TheCall->getSourceRange() << "streaming";
626 else
627 return false;
628
629 return true;
630}
631
632static ArmSMEState getSMEState(unsigned BuiltinID) {
633 switch (BuiltinID) {
634 default:
635 return ArmNoState;
636#define GET_SME_BUILTIN_GET_STATE
637#include "clang/Basic/arm_sme_builtins_za_state.inc"
638#undef GET_SME_BUILTIN_GET_STATE
639 }
640}
641
642bool SemaARM::CheckSMEBuiltinFunctionCall(unsigned BuiltinID,
643 CallExpr *TheCall) {
644 if (const FunctionDecl *FD =
645 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
646 std::optional<ArmStreamingType> BuiltinType;
647
648 switch (BuiltinID) {
649#define GET_SME_STREAMING_ATTRS
650#include "clang/Basic/arm_sme_streaming_attrs.inc"
651#undef GET_SME_STREAMING_ATTRS
652 }
653
654 if (BuiltinType &&
655 checkArmStreamingBuiltin(S&: SemaRef, TheCall, FD, BuiltinType: *BuiltinType, BuiltinID))
656 return true;
657
658 if ((getSMEState(BuiltinID) & ArmZAMask) && !hasArmZAState(FD))
659 Diag(Loc: TheCall->getBeginLoc(),
660 DiagID: diag::warn_attribute_arm_za_builtin_no_za_state)
661 << TheCall->getSourceRange();
662
663 if ((getSMEState(BuiltinID) & ArmZT0Mask) && !hasArmZT0State(FD))
664 Diag(Loc: TheCall->getBeginLoc(),
665 DiagID: diag::warn_attribute_arm_zt0_builtin_no_zt0_state)
666 << TheCall->getSourceRange();
667 }
668
669 // Range check SME intrinsics that take immediate values.
670 SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
671
672 switch (BuiltinID) {
673 default:
674 return false;
675#define GET_SME_IMMEDIATE_CHECK
676#include "clang/Basic/arm_sme_sema_rangechecks.inc"
677#undef GET_SME_IMMEDIATE_CHECK
678 }
679
680 return PerformSVEImmChecks(TheCall, ImmChecks);
681}
682
683bool SemaARM::CheckSVEBuiltinFunctionCall(unsigned BuiltinID,
684 CallExpr *TheCall) {
685 if (const FunctionDecl *FD =
686 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
687 std::optional<ArmStreamingType> BuiltinType;
688
689 switch (BuiltinID) {
690#define GET_SVE_STREAMING_ATTRS
691#include "clang/Basic/arm_sve_streaming_attrs.inc"
692#undef GET_SVE_STREAMING_ATTRS
693 }
694 if (BuiltinType &&
695 checkArmStreamingBuiltin(S&: SemaRef, TheCall, FD, BuiltinType: *BuiltinType, BuiltinID))
696 return true;
697 }
698 // Range check SVE intrinsics that take immediate values.
699 SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
700
701 switch (BuiltinID) {
702 default:
703 return false;
704#define GET_SVE_IMMEDIATE_CHECK
705#include "clang/Basic/arm_sve_sema_rangechecks.inc"
706#undef GET_SVE_IMMEDIATE_CHECK
707 }
708
709 return PerformSVEImmChecks(TheCall, ImmChecks);
710}
711
712bool SemaARM::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
713 unsigned BuiltinID,
714 CallExpr *TheCall) {
715 if (const FunctionDecl *FD =
716 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
717 std::optional<ArmStreamingType> BuiltinType;
718
719 switch (BuiltinID) {
720 default:
721 break;
722#define GET_NEON_STREAMING_COMPAT_FLAG
723#include "clang/Basic/arm_neon.inc"
724#undef GET_NEON_STREAMING_COMPAT_FLAG
725 }
726 if (BuiltinType &&
727 checkArmStreamingBuiltin(S&: SemaRef, TheCall, FD, BuiltinType: *BuiltinType, BuiltinID))
728 return true;
729 }
730
731 llvm::APSInt Result;
732 uint64_t mask = 0;
733 int TV = -1;
734 int PtrArgNum = -1;
735 bool HasConstPtr = false;
736 switch (BuiltinID) {
737#define GET_NEON_OVERLOAD_CHECK
738#include "clang/Basic/arm_fp16.inc"
739#include "clang/Basic/arm_neon.inc"
740#undef GET_NEON_OVERLOAD_CHECK
741 }
742
743 // For NEON intrinsics which are overloaded on vector element type, validate
744 // the immediate which specifies which variant to emit.
745 unsigned ImmArg = TheCall->getNumArgs() - 1;
746 if (mask) {
747 if (SemaRef.BuiltinConstantArg(TheCall, ArgNum: ImmArg, Result))
748 return true;
749
750 TV = Result.getLimitedValue(Limit: 64);
751 if ((TV > 63) || (mask & (1ULL << TV)) == 0)
752 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_invalid_neon_type_code)
753 << TheCall->getArg(Arg: ImmArg)->getSourceRange();
754 }
755
756 if (PtrArgNum >= 0) {
757 // Check that pointer arguments have the specified type.
758 Expr *Arg = TheCall->getArg(Arg: PtrArgNum);
759 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Val: Arg))
760 Arg = ICE->getSubExpr();
761 ExprResult RHS = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg);
762 QualType RHSTy = RHS.get()->getType();
763
764 llvm::Triple::ArchType Arch = TI.getTriple().getArch();
765 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
766 Arch == llvm::Triple::aarch64_32 ||
767 Arch == llvm::Triple::aarch64_be;
768 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
769 QualType EltTy = getNeonEltType(Flags: NeonTypeFlags(TV), Context&: getASTContext(),
770 IsPolyUnsigned, IsInt64Long);
771 if (HasConstPtr)
772 EltTy = EltTy.withConst();
773 QualType LHSTy = getASTContext().getPointerType(T: EltTy);
774 AssignConvertType ConvTy;
775 ConvTy = SemaRef.CheckSingleAssignmentConstraints(LHSType: LHSTy, RHS);
776 if (RHS.isInvalid())
777 return true;
778 if (SemaRef.DiagnoseAssignmentResult(ConvTy, Loc: Arg->getBeginLoc(), DstType: LHSTy,
779 SrcType: RHSTy, SrcExpr: RHS.get(),
780 Action: AssignmentAction::Assigning))
781 return true;
782 }
783
784 // For NEON intrinsics which take an immediate value as part of the
785 // instruction, range check them here.
786 SmallVector<std::tuple<int, int, int, int>, 2> ImmChecks;
787 switch (BuiltinID) {
788 default:
789 return false;
790#define GET_NEON_IMMEDIATE_CHECK
791#include "clang/Basic/arm_fp16.inc"
792#include "clang/Basic/arm_neon.inc"
793#undef GET_NEON_IMMEDIATE_CHECK
794 }
795
796 return PerformNeonImmChecks(TheCall, ImmChecks, OverloadType: TV);
797}
798
799bool SemaARM::CheckMVEBuiltinFunctionCall(unsigned BuiltinID,
800 CallExpr *TheCall) {
801 switch (BuiltinID) {
802 default:
803 return false;
804#include "clang/Basic/arm_mve_builtin_sema.inc"
805 }
806}
807
808bool SemaARM::CheckCDEBuiltinFunctionCall(const TargetInfo &TI,
809 unsigned BuiltinID,
810 CallExpr *TheCall) {
811 bool Err = false;
812 switch (BuiltinID) {
813 default:
814 return false;
815#include "clang/Basic/arm_cde_builtin_sema.inc"
816 }
817
818 if (Err)
819 return true;
820
821 return CheckARMCoprocessorImmediate(TI, CoprocArg: TheCall->getArg(Arg: 0), /*WantCDE*/ true);
822}
823
824bool SemaARM::CheckARMCoprocessorImmediate(const TargetInfo &TI,
825 const Expr *CoprocArg,
826 bool WantCDE) {
827 ASTContext &Context = getASTContext();
828 if (SemaRef.isConstantEvaluatedContext())
829 return false;
830
831 // We can't check the value of a dependent argument.
832 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
833 return false;
834
835 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Ctx: Context);
836 int64_t CoprocNo = CoprocNoAP.getExtValue();
837 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative");
838
839 uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
840 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
841
842 if (IsCDECoproc != WantCDE)
843 return Diag(Loc: CoprocArg->getBeginLoc(), DiagID: diag::err_arm_invalid_coproc)
844 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
845
846 return false;
847}
848
849bool SemaARM::CheckARMBuiltinExclusiveCall(unsigned BuiltinID,
850 CallExpr *TheCall,
851 unsigned MaxWidth) {
852 assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
853 BuiltinID == ARM::BI__builtin_arm_ldaex ||
854 BuiltinID == ARM::BI__builtin_arm_strex ||
855 BuiltinID == ARM::BI__builtin_arm_stlex ||
856 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
857 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
858 BuiltinID == AArch64::BI__builtin_arm_strex ||
859 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
860 "unexpected ARM builtin");
861 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
862 BuiltinID == ARM::BI__builtin_arm_ldaex ||
863 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
864 BuiltinID == AArch64::BI__builtin_arm_ldaex;
865
866 ASTContext &Context = getASTContext();
867 DeclRefExpr *DRE =
868 cast<DeclRefExpr>(Val: TheCall->getCallee()->IgnoreParenCasts());
869
870 // Ensure that we have the proper number of arguments.
871 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: IsLdrex ? 1 : 2))
872 return true;
873
874 // Inspect the pointer argument of the atomic builtin. This should always be
875 // a pointer type, whose element is an integral scalar or pointer type.
876 // Because it is a pointer type, we don't have to worry about any implicit
877 // casts here.
878 Expr *PointerArg = TheCall->getArg(Arg: IsLdrex ? 0 : 1);
879 ExprResult PointerArgRes =
880 SemaRef.DefaultFunctionArrayLvalueConversion(E: PointerArg);
881 if (PointerArgRes.isInvalid())
882 return true;
883 PointerArg = PointerArgRes.get();
884
885 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
886 if (!pointerType) {
887 Diag(Loc: DRE->getBeginLoc(), DiagID: diag::err_atomic_builtin_must_be_pointer)
888 << PointerArg->getType() << 0 << PointerArg->getSourceRange();
889 return true;
890 }
891
892 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
893 // task is to insert the appropriate casts into the AST. First work out just
894 // what the appropriate type is.
895 QualType ValType = pointerType->getPointeeType();
896 QualType AddrType = ValType.getUnqualifiedType().withVolatile();
897 if (IsLdrex)
898 AddrType.addConst();
899
900 // Issue a warning if the cast is dodgy.
901 CastKind CastNeeded = CK_NoOp;
902 if (!AddrType.isAtLeastAsQualifiedAs(other: ValType, Ctx: getASTContext())) {
903 CastNeeded = CK_BitCast;
904 Diag(Loc: DRE->getBeginLoc(), DiagID: diag::ext_typecheck_convert_discards_qualifiers)
905 << PointerArg->getType() << Context.getPointerType(T: AddrType)
906 << AssignmentAction::Passing << PointerArg->getSourceRange();
907 }
908
909 // Finally, do the cast and replace the argument with the corrected version.
910 AddrType = Context.getPointerType(T: AddrType);
911 PointerArgRes = SemaRef.ImpCastExprToType(E: PointerArg, Type: AddrType, CK: CastNeeded);
912 if (PointerArgRes.isInvalid())
913 return true;
914 PointerArg = PointerArgRes.get();
915
916 TheCall->setArg(Arg: IsLdrex ? 0 : 1, ArgExpr: PointerArg);
917
918 // In general, we allow ints, floats and pointers to be loaded and stored.
919 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
920 !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
921 Diag(Loc: DRE->getBeginLoc(), DiagID: diag::err_atomic_builtin_must_be_pointer_intfltptr)
922 << PointerArg->getType() << 0 << PointerArg->getSourceRange();
923 return true;
924 }
925
926 // But ARM doesn't have instructions to deal with 128-bit versions.
927 if (Context.getTypeSize(T: ValType) > MaxWidth) {
928 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
929 Diag(Loc: DRE->getBeginLoc(), DiagID: diag::err_atomic_exclusive_builtin_pointer_size)
930 << PointerArg->getType() << PointerArg->getSourceRange();
931 return true;
932 }
933
934 switch (ValType.getObjCLifetime()) {
935 case Qualifiers::OCL_None:
936 case Qualifiers::OCL_ExplicitNone:
937 // okay
938 break;
939
940 case Qualifiers::OCL_Weak:
941 case Qualifiers::OCL_Strong:
942 case Qualifiers::OCL_Autoreleasing:
943 Diag(Loc: DRE->getBeginLoc(), DiagID: diag::err_arc_atomic_ownership)
944 << ValType << PointerArg->getSourceRange();
945 return true;
946 }
947
948 if (IsLdrex) {
949 TheCall->setType(ValType);
950 return false;
951 }
952
953 // Initialize the argument to be stored.
954 ExprResult ValArg = TheCall->getArg(Arg: 0);
955 InitializedEntity Entity = InitializedEntity::InitializeParameter(
956 Context, Type: ValType, /*consume*/ Consumed: false);
957 ValArg = SemaRef.PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: ValArg);
958 if (ValArg.isInvalid())
959 return true;
960 TheCall->setArg(Arg: 0, ArgExpr: ValArg.get());
961
962 // __builtin_arm_strex always returns an int. It's marked as such in the .def,
963 // but the custom checker bypasses all default analysis.
964 TheCall->setType(Context.IntTy);
965 return false;
966}
967
968bool SemaARM::CheckARMBuiltinFunctionCall(const TargetInfo &TI,
969 unsigned BuiltinID,
970 CallExpr *TheCall) {
971 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
972 BuiltinID == ARM::BI__builtin_arm_ldaex ||
973 BuiltinID == ARM::BI__builtin_arm_strex ||
974 BuiltinID == ARM::BI__builtin_arm_stlex) {
975 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, MaxWidth: 64);
976 }
977
978 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
979 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
980 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 1);
981 }
982
983 if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
984 BuiltinID == ARM::BI__builtin_arm_wsr64)
985 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 3, AllowName: false);
986
987 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
988 BuiltinID == ARM::BI__builtin_arm_rsrp ||
989 BuiltinID == ARM::BI__builtin_arm_wsr ||
990 BuiltinID == ARM::BI__builtin_arm_wsrp)
991 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true);
992
993 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
994 return true;
995 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
996 return true;
997 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
998 return true;
999
1000 // For intrinsics which take an immediate value as part of the instruction,
1001 // range check them here.
1002 // FIXME: VFP Intrinsics should error if VFP not present.
1003 switch (BuiltinID) {
1004 default:
1005 return false;
1006 case ARM::BI__builtin_arm_ssat:
1007 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 1, High: 32);
1008 case ARM::BI__builtin_arm_usat:
1009 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31);
1010 case ARM::BI__builtin_arm_ssat16:
1011 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 1, High: 16);
1012 case ARM::BI__builtin_arm_usat16:
1013 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
1014 case ARM::BI__builtin_arm_vcvtr_f:
1015 case ARM::BI__builtin_arm_vcvtr_d:
1016 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1);
1017 case ARM::BI__builtin_arm_dmb:
1018 case ARM::BI__dmb:
1019 case ARM::BI__builtin_arm_dsb:
1020 case ARM::BI__dsb:
1021 case ARM::BI__builtin_arm_isb:
1022 case ARM::BI__isb:
1023 case ARM::BI__builtin_arm_dbg:
1024 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 15);
1025 case ARM::BI__builtin_arm_cdp:
1026 case ARM::BI__builtin_arm_cdp2:
1027 case ARM::BI__builtin_arm_mcr:
1028 case ARM::BI__builtin_arm_mcr2:
1029 case ARM::BI__builtin_arm_mrc:
1030 case ARM::BI__builtin_arm_mrc2:
1031 case ARM::BI__builtin_arm_mcrr:
1032 case ARM::BI__builtin_arm_mcrr2:
1033 case ARM::BI__builtin_arm_mrrc:
1034 case ARM::BI__builtin_arm_mrrc2:
1035 case ARM::BI__builtin_arm_ldc:
1036 case ARM::BI__builtin_arm_ldcl:
1037 case ARM::BI__builtin_arm_ldc2:
1038 case ARM::BI__builtin_arm_ldc2l:
1039 case ARM::BI__builtin_arm_stc:
1040 case ARM::BI__builtin_arm_stcl:
1041 case ARM::BI__builtin_arm_stc2:
1042 case ARM::BI__builtin_arm_stc2l:
1043 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 15) ||
1044 CheckARMCoprocessorImmediate(TI, CoprocArg: TheCall->getArg(Arg: 0),
1045 /*WantCDE*/ false);
1046 }
1047}
1048
1049bool SemaARM::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
1050 unsigned BuiltinID,
1051 CallExpr *TheCall) {
1052 if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
1053 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
1054 BuiltinID == AArch64::BI__builtin_arm_strex ||
1055 BuiltinID == AArch64::BI__builtin_arm_stlex) {
1056 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, MaxWidth: 128);
1057 }
1058
1059 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
1060 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
1061 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3) ||
1062 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 1) ||
1063 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 4, Low: 0, High: 1);
1064 }
1065
1066 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
1067 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
1068 BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
1069 BuiltinID == AArch64::BI__builtin_arm_wsr128)
1070 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true);
1071
1072 // Memory Tagging Extensions (MTE) Intrinsics
1073 if (BuiltinID == AArch64::BI__builtin_arm_irg ||
1074 BuiltinID == AArch64::BI__builtin_arm_addg ||
1075 BuiltinID == AArch64::BI__builtin_arm_gmi ||
1076 BuiltinID == AArch64::BI__builtin_arm_ldg ||
1077 BuiltinID == AArch64::BI__builtin_arm_stg ||
1078 BuiltinID == AArch64::BI__builtin_arm_subp) {
1079 return BuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
1080 }
1081
1082 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
1083 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
1084 BuiltinID == AArch64::BI__builtin_arm_wsr ||
1085 BuiltinID == AArch64::BI__builtin_arm_wsrp)
1086 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true);
1087
1088 // Only check the valid encoding range. Any constant in this range would be
1089 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
1090 // an exception for incorrect registers. This matches MSVC behavior.
1091 if (BuiltinID == AArch64::BI_ReadStatusReg ||
1092 BuiltinID == AArch64::BI_WriteStatusReg || BuiltinID == AArch64::BI__sys)
1093 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0x7fff);
1094
1095 if (BuiltinID == AArch64::BI__getReg)
1096 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 31);
1097
1098 if (BuiltinID == AArch64::BI__break)
1099 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0xffff);
1100
1101 if (BuiltinID == AArch64::BI__hlt)
1102 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0xffff);
1103
1104 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
1105 return true;
1106
1107 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
1108 return true;
1109
1110 if (CheckSMEBuiltinFunctionCall(BuiltinID, TheCall))
1111 return true;
1112
1113 // For intrinsics which take an immediate value as part of the instruction,
1114 // range check them here.
1115 unsigned i = 0, l = 0, u = 0;
1116 switch (BuiltinID) {
1117 default: return false;
1118 case AArch64::BI__builtin_arm_dmb:
1119 case AArch64::BI__dmb:
1120 case AArch64::BI__builtin_arm_dsb:
1121 case AArch64::BI__dsb:
1122 case AArch64::BI__builtin_arm_isb:
1123 case AArch64::BI__isb:
1124 l = 0;
1125 u = 15;
1126 break;
1127 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break;
1128 }
1129
1130 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u + l);
1131}
1132
1133namespace {
1134struct IntrinToName {
1135 uint32_t Id;
1136 int32_t FullName;
1137 int32_t ShortName;
1138};
1139} // unnamed namespace
1140
1141static bool BuiltinAliasValid(unsigned BuiltinID, StringRef AliasName,
1142 ArrayRef<IntrinToName> Map,
1143 const char *IntrinNames) {
1144 AliasName.consume_front(Prefix: "__arm_");
1145 const IntrinToName *It =
1146 llvm::lower_bound(Range&: Map, Value&: BuiltinID, C: [](const IntrinToName &L, unsigned Id) {
1147 return L.Id < Id;
1148 });
1149 if (It == Map.end() || It->Id != BuiltinID)
1150 return false;
1151 StringRef FullName(&IntrinNames[It->FullName]);
1152 if (AliasName == FullName)
1153 return true;
1154 if (It->ShortName == -1)
1155 return false;
1156 StringRef ShortName(&IntrinNames[It->ShortName]);
1157 return AliasName == ShortName;
1158}
1159
1160bool SemaARM::MveAliasValid(unsigned BuiltinID, StringRef AliasName) {
1161#include "clang/Basic/arm_mve_builtin_aliases.inc"
1162 // The included file defines:
1163 // - ArrayRef<IntrinToName> Map
1164 // - const char IntrinNames[]
1165 return BuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
1166}
1167
1168bool SemaARM::CdeAliasValid(unsigned BuiltinID, StringRef AliasName) {
1169#include "clang/Basic/arm_cde_builtin_aliases.inc"
1170 return BuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
1171}
1172
1173bool SemaARM::SveAliasValid(unsigned BuiltinID, StringRef AliasName) {
1174 if (getASTContext().BuiltinInfo.isAuxBuiltinID(ID: BuiltinID))
1175 BuiltinID = getASTContext().BuiltinInfo.getAuxBuiltinID(ID: BuiltinID);
1176 return BuiltinID >= AArch64::FirstSVEBuiltin &&
1177 BuiltinID <= AArch64::LastSVEBuiltin;
1178}
1179
1180bool SemaARM::SmeAliasValid(unsigned BuiltinID, StringRef AliasName) {
1181 if (getASTContext().BuiltinInfo.isAuxBuiltinID(ID: BuiltinID))
1182 BuiltinID = getASTContext().BuiltinInfo.getAuxBuiltinID(ID: BuiltinID);
1183 return BuiltinID >= AArch64::FirstSMEBuiltin &&
1184 BuiltinID <= AArch64::LastSMEBuiltin;
1185}
1186
1187void SemaARM::handleBuiltinAliasAttr(Decl *D, const ParsedAttr &AL) {
1188 ASTContext &Context = getASTContext();
1189 if (!AL.isArgIdent(Arg: 0)) {
1190 Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_argument_n_type)
1191 << AL << 1 << AANT_ArgumentIdentifier;
1192 return;
1193 }
1194
1195 IdentifierInfo *Ident = AL.getArgAsIdent(Arg: 0)->getIdentifierInfo();
1196 unsigned BuiltinID = Ident->getBuiltinID();
1197 StringRef AliasName = cast<FunctionDecl>(Val: D)->getIdentifier()->getName();
1198
1199 bool IsAArch64 = Context.getTargetInfo().getTriple().isAArch64();
1200 if ((IsAArch64 && !SveAliasValid(BuiltinID, AliasName) &&
1201 !SmeAliasValid(BuiltinID, AliasName)) ||
1202 (!IsAArch64 && !MveAliasValid(BuiltinID, AliasName) &&
1203 !CdeAliasValid(BuiltinID, AliasName))) {
1204 Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_arm_builtin_alias);
1205 return;
1206 }
1207
1208 D->addAttr(A: ::new (Context) ArmBuiltinAliasAttr(Context, AL, Ident));
1209}
1210
1211static bool checkNewAttrMutualExclusion(
1212 Sema &S, const ParsedAttr &AL, const FunctionProtoType *FPT,
1213 FunctionType::ArmStateValue CurrentState, StringRef StateName) {
1214 auto CheckForIncompatibleAttr =
1215 [&](FunctionType::ArmStateValue IncompatibleState,
1216 StringRef IncompatibleStateName) {
1217 if (CurrentState == IncompatibleState) {
1218 S.Diag(Loc: AL.getLoc(), DiagID: diag::err_attributes_are_not_compatible)
1219 << (std::string("'__arm_new(\"") + StateName.str() + "\")'")
1220 << (std::string("'") + IncompatibleStateName.str() + "(\"" +
1221 StateName.str() + "\")'")
1222 << true;
1223 AL.setInvalid();
1224 }
1225 };
1226
1227 CheckForIncompatibleAttr(FunctionType::ARM_In, "__arm_in");
1228 CheckForIncompatibleAttr(FunctionType::ARM_Out, "__arm_out");
1229 CheckForIncompatibleAttr(FunctionType::ARM_InOut, "__arm_inout");
1230 CheckForIncompatibleAttr(FunctionType::ARM_Preserves, "__arm_preserves");
1231 return AL.isInvalid();
1232}
1233
1234void SemaARM::handleNewAttr(Decl *D, const ParsedAttr &AL) {
1235 if (!AL.getNumArgs()) {
1236 Diag(Loc: AL.getLoc(), DiagID: diag::err_missing_arm_state) << AL;
1237 AL.setInvalid();
1238 return;
1239 }
1240
1241 std::vector<StringRef> NewState;
1242 if (const auto *ExistingAttr = D->getAttr<ArmNewAttr>()) {
1243 for (StringRef S : ExistingAttr->newArgs())
1244 NewState.push_back(x: S);
1245 }
1246
1247 bool HasZA = false;
1248 bool HasZT0 = false;
1249 for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) {
1250 StringRef StateName;
1251 SourceLocation LiteralLoc;
1252 if (!SemaRef.checkStringLiteralArgumentAttr(Attr: AL, ArgNum: I, Str&: StateName, ArgLocation: &LiteralLoc))
1253 return;
1254
1255 if (StateName == "za")
1256 HasZA = true;
1257 else if (StateName == "zt0")
1258 HasZT0 = true;
1259 else {
1260 Diag(Loc: LiteralLoc, DiagID: diag::err_unknown_arm_state) << StateName;
1261 AL.setInvalid();
1262 return;
1263 }
1264
1265 if (!llvm::is_contained(Range&: NewState, Element: StateName)) // Avoid adding duplicates.
1266 NewState.push_back(x: StateName);
1267 }
1268
1269 if (auto *FPT = dyn_cast<FunctionProtoType>(Val: D->getFunctionType())) {
1270 FunctionType::ArmStateValue ZAState =
1271 FunctionType::getArmZAState(AttrBits: FPT->getAArch64SMEAttributes());
1272 if (HasZA && ZAState != FunctionType::ARM_None &&
1273 checkNewAttrMutualExclusion(S&: SemaRef, AL, FPT, CurrentState: ZAState, StateName: "za"))
1274 return;
1275 FunctionType::ArmStateValue ZT0State =
1276 FunctionType::getArmZT0State(AttrBits: FPT->getAArch64SMEAttributes());
1277 if (HasZT0 && ZT0State != FunctionType::ARM_None &&
1278 checkNewAttrMutualExclusion(S&: SemaRef, AL, FPT, CurrentState: ZT0State, StateName: "zt0"))
1279 return;
1280 }
1281
1282 D->dropAttr<ArmNewAttr>();
1283 D->addAttr(A: ::new (getASTContext()) ArmNewAttr(
1284 getASTContext(), AL, NewState.data(), NewState.size()));
1285}
1286
1287void SemaARM::handleCmseNSEntryAttr(Decl *D, const ParsedAttr &AL) {
1288 if (getLangOpts().CPlusPlus && !D->getDeclContext()->isExternCContext()) {
1289 Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_not_clinkage) << AL;
1290 return;
1291 }
1292
1293 const auto *FD = cast<FunctionDecl>(Val: D);
1294 if (!FD->isExternallyVisible()) {
1295 Diag(Loc: AL.getLoc(), DiagID: diag::warn_attribute_cmse_entry_static);
1296 return;
1297 }
1298
1299 D->addAttr(A: ::new (getASTContext()) CmseNSEntryAttr(getASTContext(), AL));
1300}
1301
1302void SemaARM::handleInterruptAttr(Decl *D, const ParsedAttr &AL) {
1303 // Check the attribute arguments.
1304 if (AL.getNumArgs() > 1) {
1305 Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_too_many_arguments) << AL << 1;
1306 return;
1307 }
1308
1309 StringRef Str;
1310 SourceLocation ArgLoc;
1311
1312 if (AL.getNumArgs() == 0)
1313 Str = "";
1314 else if (!SemaRef.checkStringLiteralArgumentAttr(Attr: AL, ArgNum: 0, Str, ArgLocation: &ArgLoc))
1315 return;
1316
1317 ARMInterruptAttr::InterruptType Kind;
1318 if (!ARMInterruptAttr::ConvertStrToInterruptType(Val: Str, Out&: Kind)) {
1319 Diag(Loc: AL.getLoc(), DiagID: diag::warn_attribute_type_not_supported)
1320 << AL << Str << ArgLoc;
1321 return;
1322 }
1323
1324 if (!D->hasAttr<ARMSaveFPAttr>()) {
1325 const TargetInfo &TI = getASTContext().getTargetInfo();
1326 if (TI.hasFeature(Feature: "vfp"))
1327 Diag(Loc: D->getLocation(), DiagID: diag::warn_arm_interrupt_vfp_clobber);
1328 }
1329
1330 D->addAttr(A: ::new (getASTContext())
1331 ARMInterruptAttr(getASTContext(), AL, Kind));
1332}
1333
1334void SemaARM::handleInterruptSaveFPAttr(Decl *D, const ParsedAttr &AL) {
1335 // Go ahead and add ARMSaveFPAttr because handleInterruptAttr() checks for
1336 // it when deciding to issue a diagnostic about clobbering floating point
1337 // registers, which ARMSaveFPAttr prevents.
1338 D->addAttr(A: ::new (SemaRef.Context) ARMSaveFPAttr(SemaRef.Context, AL));
1339 SemaRef.ARM().handleInterruptAttr(D, AL);
1340
1341 // If ARM().handleInterruptAttr() failed, remove ARMSaveFPAttr.
1342 if (!D->hasAttr<ARMInterruptAttr>()) {
1343 D->dropAttr<ARMSaveFPAttr>();
1344 return;
1345 }
1346
1347 // If VFP not enabled, remove ARMSaveFPAttr but leave ARMInterruptAttr.
1348 bool VFP = SemaRef.Context.getTargetInfo().hasFeature(Feature: "vfp");
1349
1350 if (!VFP) {
1351 SemaRef.Diag(Loc: D->getLocation(), DiagID: diag::warn_arm_interrupt_save_fp_without_vfp_unit);
1352 D->dropAttr<ARMSaveFPAttr>();
1353 }
1354}
1355
1356// Check if the function definition uses any AArch64 SME features without
1357// having the '+sme' feature enabled and warn user if sme locally streaming
1358// function returns or uses arguments with VL-based types.
1359void SemaARM::CheckSMEFunctionDefAttributes(const FunctionDecl *FD) {
1360 const auto *Attr = FD->getAttr<ArmNewAttr>();
1361 bool UsesSM = FD->hasAttr<ArmLocallyStreamingAttr>();
1362 bool UsesZA = Attr && Attr->isNewZA();
1363 bool UsesZT0 = Attr && Attr->isNewZT0();
1364
1365 if (UsesZA || UsesZT0) {
1366 if (const auto *FPT = FD->getType()->getAs<FunctionProtoType>()) {
1367 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
1368 if (EPI.AArch64SMEAttributes & FunctionType::SME_AgnosticZAStateMask)
1369 Diag(Loc: FD->getLocation(), DiagID: diag::err_sme_unsupported_agnostic_new);
1370 }
1371 }
1372
1373 if (FD->hasAttr<ArmLocallyStreamingAttr>()) {
1374 if (FD->getReturnType()->isSizelessVectorType())
1375 Diag(Loc: FD->getLocation(),
1376 DiagID: diag::warn_sme_locally_streaming_has_vl_args_returns)
1377 << /*IsArg=*/false;
1378 if (llvm::any_of(Range: FD->parameters(), P: [](ParmVarDecl *P) {
1379 return P->getOriginalType()->isSizelessVectorType();
1380 }))
1381 Diag(Loc: FD->getLocation(),
1382 DiagID: diag::warn_sme_locally_streaming_has_vl_args_returns)
1383 << /*IsArg=*/true;
1384 }
1385 if (const auto *FPT = FD->getType()->getAs<FunctionProtoType>()) {
1386 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
1387 UsesSM |= EPI.AArch64SMEAttributes & FunctionType::SME_PStateSMEnabledMask;
1388 UsesZA |= FunctionType::getArmZAState(AttrBits: EPI.AArch64SMEAttributes) !=
1389 FunctionType::ARM_None;
1390 UsesZT0 |= FunctionType::getArmZT0State(AttrBits: EPI.AArch64SMEAttributes) !=
1391 FunctionType::ARM_None;
1392 }
1393
1394 ASTContext &Context = getASTContext();
1395 if (UsesSM || UsesZA) {
1396 llvm::StringMap<bool> FeatureMap;
1397 Context.getFunctionFeatureMap(FeatureMap, FD);
1398 if (!FeatureMap.contains(Key: "sme")) {
1399 if (UsesSM)
1400 Diag(Loc: FD->getLocation(),
1401 DiagID: diag::err_sme_definition_using_sm_in_non_sme_target);
1402 else
1403 Diag(Loc: FD->getLocation(),
1404 DiagID: diag::err_sme_definition_using_za_in_non_sme_target);
1405 }
1406 }
1407 if (UsesZT0) {
1408 llvm::StringMap<bool> FeatureMap;
1409 Context.getFunctionFeatureMap(FeatureMap, FD);
1410 if (!FeatureMap.contains(Key: "sme2")) {
1411 Diag(Loc: FD->getLocation(),
1412 DiagID: diag::err_sme_definition_using_zt0_in_non_sme2_target);
1413 }
1414 }
1415}
1416
1417/// getSVETypeSize - Return SVE vector or predicate register size.
1418static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty,
1419 bool IsStreaming) {
1420 assert(Ty->isSveVLSBuiltinType() && "Invalid SVE Type");
1421 uint64_t VScale = IsStreaming ? Context.getLangOpts().VScaleStreamingMin
1422 : Context.getLangOpts().VScaleMin;
1423 if (Ty->getKind() == BuiltinType::SveBool ||
1424 Ty->getKind() == BuiltinType::SveCount)
1425 return (VScale * 128) / Context.getCharWidth();
1426 return VScale * 128;
1427}
1428
1429bool SemaARM::areCompatibleSveTypes(QualType FirstType, QualType SecondType) {
1430 bool IsStreaming = false;
1431 if (getLangOpts().VScaleMin != getLangOpts().VScaleStreamingMin ||
1432 getLangOpts().VScaleMax != getLangOpts().VScaleStreamingMax) {
1433 if (const FunctionDecl *FD =
1434 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
1435 // For streaming-compatible functions, we don't know vector length.
1436 if (const auto *T = FD->getType()->getAs<FunctionProtoType>()) {
1437 if (T->getAArch64SMEAttributes() &
1438 FunctionType::SME_PStateSMCompatibleMask)
1439 return false;
1440 }
1441
1442 if (IsArmStreamingFunction(FD, /*IncludeLocallyStreaming=*/true))
1443 IsStreaming = true;
1444 }
1445 }
1446
1447 auto IsValidCast = [&](QualType FirstType, QualType SecondType) {
1448 if (const auto *BT = FirstType->getAs<BuiltinType>()) {
1449 if (const auto *VT = SecondType->getAs<VectorType>()) {
1450 // Predicates have the same representation as uint8 so we also have to
1451 // check the kind to make these types incompatible.
1452 ASTContext &Context = getASTContext();
1453 if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
1454 return BT->getKind() == BuiltinType::SveBool;
1455 else if (VT->getVectorKind() == VectorKind::SveFixedLengthData)
1456 return VT->getElementType().getCanonicalType() ==
1457 FirstType->getSveEltType(Ctx: Context);
1458 else if (VT->getVectorKind() == VectorKind::Generic)
1459 return Context.getTypeSize(T: SecondType) ==
1460 getSVETypeSize(Context, Ty: BT, IsStreaming) &&
1461 Context.hasSameType(
1462 T1: VT->getElementType(),
1463 T2: Context.getBuiltinVectorTypeInfo(VecTy: BT).ElementType);
1464 }
1465 }
1466 return false;
1467 };
1468
1469 return IsValidCast(FirstType, SecondType) ||
1470 IsValidCast(SecondType, FirstType);
1471}
1472
1473bool SemaARM::areLaxCompatibleSveTypes(QualType FirstType,
1474 QualType SecondType) {
1475 bool IsStreaming = false;
1476 if (getLangOpts().VScaleMin != getLangOpts().VScaleStreamingMin ||
1477 getLangOpts().VScaleMax != getLangOpts().VScaleStreamingMax) {
1478 if (const FunctionDecl *FD =
1479 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
1480 // For streaming-compatible functions, we don't know vector length.
1481 if (const auto *T = FD->getType()->getAs<FunctionProtoType>())
1482 if (T->getAArch64SMEAttributes() &
1483 FunctionType::SME_PStateSMCompatibleMask)
1484 return false;
1485
1486 if (IsArmStreamingFunction(FD, /*IncludeLocallyStreaming=*/true))
1487 IsStreaming = true;
1488 }
1489 }
1490
1491 auto IsLaxCompatible = [&](QualType FirstType, QualType SecondType) {
1492 const auto *BT = FirstType->getAs<BuiltinType>();
1493 if (!BT)
1494 return false;
1495
1496 const auto *VecTy = SecondType->getAs<VectorType>();
1497 if (VecTy && (VecTy->getVectorKind() == VectorKind::SveFixedLengthData ||
1498 VecTy->getVectorKind() == VectorKind::Generic)) {
1499 const LangOptions::LaxVectorConversionKind LVCKind =
1500 getLangOpts().getLaxVectorConversions();
1501 ASTContext &Context = getASTContext();
1502
1503 // Can not convert between sve predicates and sve vectors because of
1504 // different size.
1505 if (BT->getKind() == BuiltinType::SveBool &&
1506 VecTy->getVectorKind() == VectorKind::SveFixedLengthData)
1507 return false;
1508
1509 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion.
1510 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly
1511 // converts to VLAT and VLAT implicitly converts to GNUT."
1512 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and
1513 // predicates.
1514 if (VecTy->getVectorKind() == VectorKind::Generic &&
1515 Context.getTypeSize(T: SecondType) !=
1516 getSVETypeSize(Context, Ty: BT, IsStreaming))
1517 return false;
1518
1519 // If -flax-vector-conversions=all is specified, the types are
1520 // certainly compatible.
1521 if (LVCKind == LangOptions::LaxVectorConversionKind::All)
1522 return true;
1523
1524 // If -flax-vector-conversions=integer is specified, the types are
1525 // compatible if the elements are integer types.
1526 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer)
1527 return VecTy->getElementType().getCanonicalType()->isIntegerType() &&
1528 FirstType->getSveEltType(Ctx: Context)->isIntegerType();
1529 }
1530
1531 return false;
1532 };
1533
1534 return IsLaxCompatible(FirstType, SecondType) ||
1535 IsLaxCompatible(SecondType, FirstType);
1536}
1537
1538} // namespace clang
1539

source code of clang/lib/Sema/SemaARM.cpp