1//===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8#include "../ExprConstShared.h"
9#include "Boolean.h"
10#include "Compiler.h"
11#include "EvalEmitter.h"
12#include "Interp.h"
13#include "InterpBuiltinBitCast.h"
14#include "PrimType.h"
15#include "clang/AST/OSLog.h"
16#include "clang/AST/RecordLayout.h"
17#include "clang/Basic/Builtins.h"
18#include "clang/Basic/TargetBuiltins.h"
19#include "clang/Basic/TargetInfo.h"
20#include "llvm/ADT/StringExtras.h"
21#include "llvm/Support/SipHash.h"
22
23namespace clang {
24namespace interp {
25
26LLVM_ATTRIBUTE_UNUSED static bool isNoopBuiltin(unsigned ID) {
27 switch (ID) {
28 case Builtin::BIas_const:
29 case Builtin::BIforward:
30 case Builtin::BIforward_like:
31 case Builtin::BImove:
32 case Builtin::BImove_if_noexcept:
33 case Builtin::BIaddressof:
34 case Builtin::BI__addressof:
35 case Builtin::BI__builtin_addressof:
36 case Builtin::BI__builtin_launder:
37 return true;
38 default:
39 return false;
40 }
41 return false;
42}
43
44static void discard(InterpStack &Stk, PrimType T) {
45 TYPE_SWITCH(T, { Stk.discard<T>(); });
46}
47
48static APSInt popToAPSInt(InterpStack &Stk, PrimType T) {
49 INT_TYPE_SWITCH(T, return Stk.pop<T>().toAPSInt());
50}
51
52/// Pushes \p Val on the stack as the type given by \p QT.
53static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
54 assert(QT->isSignedIntegerOrEnumerationType() ||
55 QT->isUnsignedIntegerOrEnumerationType());
56 std::optional<PrimType> T = S.getContext().classify(T: QT);
57 assert(T);
58
59 unsigned BitWidth = S.getASTContext().getTypeSize(T: QT);
60 if (QT->isSignedIntegerOrEnumerationType()) {
61 int64_t V = Val.getSExtValue();
62 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
63 } else {
64 assert(QT->isUnsignedIntegerOrEnumerationType());
65 uint64_t V = Val.getZExtValue();
66 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
67 }
68}
69
70template <typename T>
71static void pushInteger(InterpState &S, T Val, QualType QT) {
72 if constexpr (std::is_same_v<T, APInt>)
73 pushInteger(S, Val: APSInt(Val, !std::is_signed_v<T>), QT);
74 else if constexpr (std::is_same_v<T, APSInt>)
75 pushInteger(S, Val, QT);
76 else
77 pushInteger(S,
78 Val: APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
79 std::is_signed_v<T>),
80 !std::is_signed_v<T>),
81 QT);
82}
83
84static void assignInteger(const Pointer &Dest, PrimType ValueT,
85 const APSInt &Value) {
86 INT_TYPE_SWITCH_NO_BOOL(
87 ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
88}
89
90static QualType getElemType(const Pointer &P) {
91 const Descriptor *Desc = P.getFieldDesc();
92 QualType T = Desc->getType();
93 if (Desc->isPrimitive())
94 return T;
95 if (T->isPointerType())
96 return T->getAs<PointerType>()->getPointeeType();
97 if (Desc->isArray())
98 return Desc->getElemQualType();
99 if (const auto *AT = T->getAsArrayTypeUnsafe())
100 return AT->getElementType();
101 return T;
102}
103
104static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC,
105 unsigned ID) {
106 if (!S.diagnosing())
107 return;
108
109 auto Loc = S.Current->getSource(PC: OpPC);
110 if (S.getLangOpts().CPlusPlus11)
111 S.CCEDiag(Loc, diag::note_constexpr_invalid_function)
112 << /*isConstexpr=*/0 << /*isConstructor=*/0
113 << S.getASTContext().BuiltinInfo.getQuotedName(ID);
114 else
115 S.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
116}
117
118static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC,
119 const InterpFrame *Frame,
120 const CallExpr *Call) {
121 unsigned Depth = S.Current->getDepth();
122 auto isStdCall = [](const FunctionDecl *F) -> bool {
123 return F && F->isInStdNamespace() && F->getIdentifier() &&
124 F->getIdentifier()->isStr("is_constant_evaluated");
125 };
126 const InterpFrame *Caller = Frame->Caller;
127 // The current frame is the one for __builtin_is_constant_evaluated.
128 // The one above that, potentially the one for std::is_constant_evaluated().
129 if (S.inConstantContext() && !S.checkingPotentialConstantExpression() &&
130 S.getEvalStatus().Diag &&
131 (Depth == 0 || (Depth == 1 && isStdCall(Frame->getCallee())))) {
132 if (Caller && isStdCall(Frame->getCallee())) {
133 const Expr *E = Caller->getExpr(PC: Caller->getRetPC());
134 S.report(E->getExprLoc(),
135 diag::warn_is_constant_evaluated_always_true_constexpr)
136 << "std::is_constant_evaluated" << E->getSourceRange();
137 } else {
138 S.report(Call->getExprLoc(),
139 diag::warn_is_constant_evaluated_always_true_constexpr)
140 << "__builtin_is_constant_evaluated" << Call->getSourceRange();
141 }
142 }
143
144 S.Stk.push<Boolean>(Args: Boolean::from(Value: S.inConstantContext()));
145 return true;
146}
147
148// __builtin_assume(int)
149static bool interp__builtin_assume(InterpState &S, CodePtr OpPC,
150 const InterpFrame *Frame,
151 const CallExpr *Call) {
152 assert(Call->getNumArgs() == 1);
153 discard(Stk&: S.Stk, T: *S.getContext().classify(E: Call->getArg(Arg: 0)));
154 return true;
155}
156
157static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC,
158 const InterpFrame *Frame,
159 const CallExpr *Call, unsigned ID) {
160 uint64_t Limit = ~static_cast<uint64_t>(0);
161 if (ID == Builtin::BIstrncmp || ID == Builtin::BI__builtin_strncmp ||
162 ID == Builtin::BIwcsncmp || ID == Builtin::BI__builtin_wcsncmp)
163 Limit = popToAPSInt(Stk&: S.Stk, T: *S.getContext().classify(E: Call->getArg(Arg: 2)))
164 .getZExtValue();
165
166 const Pointer &B = S.Stk.pop<Pointer>();
167 const Pointer &A = S.Stk.pop<Pointer>();
168 if (ID == Builtin::BIstrcmp || ID == Builtin::BIstrncmp ||
169 ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp)
170 diagnoseNonConstexprBuiltin(S, OpPC, ID);
171
172 if (Limit == 0) {
173 pushInteger(S, 0, Call->getType());
174 return true;
175 }
176
177 if (!CheckLive(S, OpPC, Ptr: A, AK: AK_Read) || !CheckLive(S, OpPC, Ptr: B, AK: AK_Read))
178 return false;
179
180 if (A.isDummy() || B.isDummy())
181 return false;
182
183 bool IsWide = ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp ||
184 ID == Builtin::BI__builtin_wcscmp ||
185 ID == Builtin::BI__builtin_wcsncmp;
186 assert(A.getFieldDesc()->isPrimitiveArray());
187 assert(B.getFieldDesc()->isPrimitiveArray());
188
189 assert(getElemType(A).getTypePtr() == getElemType(B).getTypePtr());
190 PrimType ElemT = *S.getContext().classify(T: getElemType(P: A));
191
192 auto returnResult = [&](int V) -> bool {
193 pushInteger(S, V, Call->getType());
194 return true;
195 };
196
197 unsigned IndexA = A.getIndex();
198 unsigned IndexB = B.getIndex();
199 uint64_t Steps = 0;
200 for (;; ++IndexA, ++IndexB, ++Steps) {
201
202 if (Steps >= Limit)
203 break;
204 const Pointer &PA = A.atIndex(Idx: IndexA);
205 const Pointer &PB = B.atIndex(Idx: IndexB);
206 if (!CheckRange(S, OpPC, Ptr: PA, AK: AK_Read) ||
207 !CheckRange(S, OpPC, Ptr: PB, AK: AK_Read)) {
208 return false;
209 }
210
211 if (IsWide) {
212 INT_TYPE_SWITCH(ElemT, {
213 T CA = PA.deref<T>();
214 T CB = PB.deref<T>();
215 if (CA > CB)
216 return returnResult(1);
217 else if (CA < CB)
218 return returnResult(-1);
219 else if (CA.isZero() || CB.isZero())
220 return returnResult(0);
221 });
222 continue;
223 }
224
225 uint8_t CA = PA.deref<uint8_t>();
226 uint8_t CB = PB.deref<uint8_t>();
227
228 if (CA > CB)
229 return returnResult(1);
230 else if (CA < CB)
231 return returnResult(-1);
232 if (CA == 0 || CB == 0)
233 return returnResult(0);
234 }
235
236 return returnResult(0);
237}
238
239static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC,
240 const InterpFrame *Frame,
241 const CallExpr *Call, unsigned ID) {
242 const Pointer &StrPtr = S.Stk.pop<Pointer>();
243
244 if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen)
245 diagnoseNonConstexprBuiltin(S, OpPC, ID);
246
247 if (!CheckArray(S, OpPC, Ptr: StrPtr))
248 return false;
249
250 if (!CheckLive(S, OpPC, Ptr: StrPtr, AK: AK_Read))
251 return false;
252
253 if (!CheckDummy(S, OpPC, Ptr: StrPtr, AK: AK_Read))
254 return false;
255
256 assert(StrPtr.getFieldDesc()->isPrimitiveArray());
257 unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize();
258
259 if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) {
260 [[maybe_unused]] const ASTContext &AC = S.getASTContext();
261 assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity());
262 }
263
264 size_t Len = 0;
265 for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
266 const Pointer &ElemPtr = StrPtr.atIndex(Idx: I);
267
268 if (!CheckRange(S, OpPC, Ptr: ElemPtr, AK: AK_Read))
269 return false;
270
271 uint32_t Val;
272 switch (ElemSize) {
273 case 1:
274 Val = ElemPtr.deref<uint8_t>();
275 break;
276 case 2:
277 Val = ElemPtr.deref<uint16_t>();
278 break;
279 case 4:
280 Val = ElemPtr.deref<uint32_t>();
281 break;
282 default:
283 llvm_unreachable("Unsupported char size");
284 }
285 if (Val == 0)
286 break;
287 }
288
289 pushInteger(S, Len, Call->getType());
290
291 return true;
292}
293
294static bool interp__builtin_nan(InterpState &S, CodePtr OpPC,
295 const InterpFrame *Frame, const CallExpr *Call,
296 bool Signaling) {
297 const Pointer &Arg = S.Stk.pop<Pointer>();
298
299 if (!CheckLoad(S, OpPC, Ptr: Arg))
300 return false;
301
302 assert(Arg.getFieldDesc()->isPrimitiveArray());
303
304 // Convert the given string to an integer using StringRef's API.
305 llvm::APInt Fill;
306 std::string Str;
307 assert(Arg.getNumElems() >= 1);
308 for (unsigned I = 0;; ++I) {
309 const Pointer &Elem = Arg.atIndex(Idx: I);
310
311 if (!CheckLoad(S, OpPC, Ptr: Elem))
312 return false;
313
314 if (Elem.deref<int8_t>() == 0)
315 break;
316
317 Str += Elem.deref<char>();
318 }
319
320 // Treat empty strings as if they were zero.
321 if (Str.empty())
322 Fill = llvm::APInt(32, 0);
323 else if (StringRef(Str).getAsInteger(Radix: 0, Result&: Fill))
324 return false;
325
326 const llvm::fltSemantics &TargetSemantics =
327 S.getASTContext().getFloatTypeSemantics(
328 T: Call->getDirectCallee()->getReturnType());
329
330 Floating Result;
331 if (S.getASTContext().getTargetInfo().isNan2008()) {
332 if (Signaling)
333 Result = Floating(
334 llvm::APFloat::getSNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill));
335 else
336 Result = Floating(
337 llvm::APFloat::getQNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill));
338 } else {
339 // Prior to IEEE 754-2008, architectures were allowed to choose whether
340 // the first bit of their significand was set for qNaN or sNaN. MIPS chose
341 // a different encoding to what became a standard in 2008, and for pre-
342 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
343 // sNaN. This is now known as "legacy NaN" encoding.
344 if (Signaling)
345 Result = Floating(
346 llvm::APFloat::getQNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill));
347 else
348 Result = Floating(
349 llvm::APFloat::getSNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill));
350 }
351
352 S.Stk.push<Floating>(Args&: Result);
353 return true;
354}
355
356static bool interp__builtin_inf(InterpState &S, CodePtr OpPC,
357 const InterpFrame *Frame,
358 const CallExpr *Call) {
359 const llvm::fltSemantics &TargetSemantics =
360 S.getASTContext().getFloatTypeSemantics(
361 T: Call->getDirectCallee()->getReturnType());
362
363 S.Stk.push<Floating>(Args: Floating::getInf(Sem: TargetSemantics));
364 return true;
365}
366
367static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC,
368 const InterpFrame *Frame) {
369 const Floating &Arg2 = S.Stk.pop<Floating>();
370 const Floating &Arg1 = S.Stk.pop<Floating>();
371
372 APFloat Copy = Arg1.getAPFloat();
373 Copy.copySign(RHS: Arg2.getAPFloat());
374 S.Stk.push<Floating>(Args: Floating(Copy));
375
376 return true;
377}
378
379static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC,
380 const InterpFrame *Frame, bool IsNumBuiltin) {
381 const Floating &RHS = S.Stk.pop<Floating>();
382 const Floating &LHS = S.Stk.pop<Floating>();
383
384 if (IsNumBuiltin)
385 S.Stk.push<Floating>(Args: llvm::minimumnum(A: LHS.getAPFloat(), B: RHS.getAPFloat()));
386 else
387 S.Stk.push<Floating>(Args: minnum(A: LHS.getAPFloat(), B: RHS.getAPFloat()));
388 return true;
389}
390
391static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC,
392 const InterpFrame *Frame, bool IsNumBuiltin) {
393 const Floating &RHS = S.Stk.pop<Floating>();
394 const Floating &LHS = S.Stk.pop<Floating>();
395
396 if (IsNumBuiltin)
397 S.Stk.push<Floating>(Args: llvm::maximumnum(A: LHS.getAPFloat(), B: RHS.getAPFloat()));
398 else
399 S.Stk.push<Floating>(Args: maxnum(A: LHS.getAPFloat(), B: RHS.getAPFloat()));
400 return true;
401}
402
403/// Defined as __builtin_isnan(...), to accommodate the fact that it can
404/// take a float, double, long double, etc.
405/// But for us, that's all a Floating anyway.
406static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC,
407 const InterpFrame *Frame,
408 const CallExpr *Call) {
409 const Floating &Arg = S.Stk.pop<Floating>();
410
411 pushInteger(S, Arg.isNan(), Call->getType());
412 return true;
413}
414
415static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC,
416 const InterpFrame *Frame,
417 const CallExpr *Call) {
418 const Floating &Arg = S.Stk.pop<Floating>();
419
420 pushInteger(S, Arg.isSignaling(), Call->getType());
421 return true;
422}
423
424static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC,
425 const InterpFrame *Frame, bool CheckSign,
426 const CallExpr *Call) {
427 const Floating &Arg = S.Stk.pop<Floating>();
428 bool IsInf = Arg.isInf();
429
430 if (CheckSign)
431 pushInteger(S, IsInf ? (Arg.isNegative() ? -1 : 1) : 0, Call->getType());
432 else
433 pushInteger(S, Arg.isInf(), Call->getType());
434 return true;
435}
436
437static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC,
438 const InterpFrame *Frame,
439 const CallExpr *Call) {
440 const Floating &Arg = S.Stk.pop<Floating>();
441
442 pushInteger(S, Arg.isFinite(), Call->getType());
443 return true;
444}
445
446static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC,
447 const InterpFrame *Frame,
448 const CallExpr *Call) {
449 const Floating &Arg = S.Stk.pop<Floating>();
450
451 pushInteger(S, Arg.isNormal(), Call->getType());
452 return true;
453}
454
455static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC,
456 const InterpFrame *Frame,
457 const CallExpr *Call) {
458 const Floating &Arg = S.Stk.pop<Floating>();
459
460 pushInteger(S, Arg.isDenormal(), Call->getType());
461 return true;
462}
463
464static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC,
465 const InterpFrame *Frame,
466 const CallExpr *Call) {
467 const Floating &Arg = S.Stk.pop<Floating>();
468
469 pushInteger(S, Arg.isZero(), Call->getType());
470 return true;
471}
472
473static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC,
474 const InterpFrame *Frame,
475 const CallExpr *Call) {
476 const Floating &Arg = S.Stk.pop<Floating>();
477
478 pushInteger(S, Arg.isNegative(), Call->getType());
479 return true;
480}
481
482static bool interp_floating_comparison(InterpState &S, CodePtr OpPC,
483 const CallExpr *Call, unsigned ID) {
484 const Floating &RHS = S.Stk.pop<Floating>();
485 const Floating &LHS = S.Stk.pop<Floating>();
486
487 pushInteger(
488 S,
489 [&] {
490 switch (ID) {
491 case Builtin::BI__builtin_isgreater:
492 return LHS > RHS;
493 case Builtin::BI__builtin_isgreaterequal:
494 return LHS >= RHS;
495 case Builtin::BI__builtin_isless:
496 return LHS < RHS;
497 case Builtin::BI__builtin_islessequal:
498 return LHS <= RHS;
499 case Builtin::BI__builtin_islessgreater: {
500 ComparisonCategoryResult cmp = LHS.compare(RHS);
501 return cmp == ComparisonCategoryResult::Less ||
502 cmp == ComparisonCategoryResult::Greater;
503 }
504 case Builtin::BI__builtin_isunordered:
505 return LHS.compare(RHS) == ComparisonCategoryResult::Unordered;
506 default:
507 llvm_unreachable("Unexpected builtin ID: Should be a floating point "
508 "comparison function");
509 }
510 }(),
511 Call->getType());
512 return true;
513}
514
515/// First parameter to __builtin_isfpclass is the floating value, the
516/// second one is an integral value.
517static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC,
518 const InterpFrame *Frame,
519 const CallExpr *Call) {
520 PrimType FPClassArgT = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType());
521 APSInt FPClassArg = popToAPSInt(Stk&: S.Stk, T: FPClassArgT);
522 const Floating &F = S.Stk.pop<Floating>();
523
524 int32_t Result =
525 static_cast<int32_t>((F.classify() & FPClassArg).getZExtValue());
526 pushInteger(S, Result, Call->getType());
527
528 return true;
529}
530
531/// Five int values followed by one floating value.
532/// __builtin_fpclassify(int, int, int, int, int, float)
533static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC,
534 const InterpFrame *Frame,
535 const CallExpr *Call) {
536 const Floating &Val = S.Stk.pop<Floating>();
537
538 PrimType IntT = *S.getContext().classify(E: Call->getArg(Arg: 0));
539 APSInt Values[5];
540 for (unsigned I = 0; I != 5; ++I)
541 Values[4 - I] = popToAPSInt(Stk&: S.Stk, T: IntT);
542
543 unsigned Index;
544 switch (Val.getCategory()) {
545 case APFloat::fcNaN:
546 Index = 0;
547 break;
548 case APFloat::fcInfinity:
549 Index = 1;
550 break;
551 case APFloat::fcNormal:
552 Index = Val.isDenormal() ? 3 : 2;
553 break;
554 case APFloat::fcZero:
555 Index = 4;
556 break;
557 }
558
559 // The last argument is first on the stack.
560 assert(Index <= 4);
561
562 pushInteger(S, Values[Index], Call->getType());
563 return true;
564}
565
566// The C standard says "fabs raises no floating-point exceptions,
567// even if x is a signaling NaN. The returned value is independent of
568// the current rounding direction mode." Therefore constant folding can
569// proceed without regard to the floating point settings.
570// Reference, WG14 N2478 F.10.4.3
571static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC,
572 const InterpFrame *Frame) {
573 const Floating &Val = S.Stk.pop<Floating>();
574
575 S.Stk.push<Floating>(Args: Floating::abs(F: Val));
576 return true;
577}
578
579static bool interp__builtin_abs(InterpState &S, CodePtr OpPC,
580 const InterpFrame *Frame,
581 const CallExpr *Call) {
582 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
583 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
584 if (Val ==
585 APSInt(APInt::getSignedMinValue(numBits: Val.getBitWidth()), /*IsUnsigned=*/false))
586 return false;
587 if (Val.isNegative())
588 Val.negate();
589 pushInteger(S, Val, Call->getType());
590 return true;
591}
592
593static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC,
594 const InterpFrame *Frame,
595 const CallExpr *Call) {
596 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
597 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
598 pushInteger(S, Val.popcount(), Call->getType());
599 return true;
600}
601
602static bool interp__builtin_parity(InterpState &S, CodePtr OpPC,
603 const InterpFrame *Frame,
604 const CallExpr *Call) {
605 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
606 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
607 pushInteger(S, Val.popcount() % 2, Call->getType());
608 return true;
609}
610
611static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC,
612 const InterpFrame *Frame,
613 const CallExpr *Call) {
614 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
615 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
616 pushInteger(S, Val.getBitWidth() - Val.getSignificantBits(), Call->getType());
617 return true;
618}
619
620static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC,
621 const InterpFrame *Frame,
622 const CallExpr *Call) {
623 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
624 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
625 pushInteger(S, Val.reverseBits(), Call->getType());
626 return true;
627}
628
629static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC,
630 const InterpFrame *Frame,
631 const CallExpr *Call) {
632 // This is an unevaluated call, so there are no arguments on the stack.
633 assert(Call->getNumArgs() == 1);
634 const Expr *Arg = Call->getArg(Arg: 0);
635
636 GCCTypeClass ResultClass =
637 EvaluateBuiltinClassifyType(T: Arg->getType(), LangOpts: S.getLangOpts());
638 int32_t ReturnVal = static_cast<int32_t>(ResultClass);
639 pushInteger(S, ReturnVal, Call->getType());
640 return true;
641}
642
643// __builtin_expect(long, long)
644// __builtin_expect_with_probability(long, long, double)
645static bool interp__builtin_expect(InterpState &S, CodePtr OpPC,
646 const InterpFrame *Frame,
647 const CallExpr *Call) {
648 // The return value is simply the value of the first parameter.
649 // We ignore the probability.
650 unsigned NumArgs = Call->getNumArgs();
651 assert(NumArgs == 2 || NumArgs == 3);
652
653 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
654 if (NumArgs == 3)
655 S.Stk.discard<Floating>();
656 discard(Stk&: S.Stk, T: ArgT);
657
658 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
659 pushInteger(S, Val, Call->getType());
660 return true;
661}
662
663/// rotateleft(value, amount)
664static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC,
665 const InterpFrame *Frame,
666 const CallExpr *Call, bool Right) {
667 PrimType AmountT = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType());
668 PrimType ValueT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
669
670 APSInt Amount = popToAPSInt(Stk&: S.Stk, T: AmountT);
671 APSInt Value = popToAPSInt(Stk&: S.Stk, T: ValueT);
672
673 APSInt Result;
674 if (Right)
675 Result = APSInt(Value.rotr(rotateAmt: Amount.urem(RHS: Value.getBitWidth())),
676 /*IsUnsigned=*/true);
677 else // Left.
678 Result = APSInt(Value.rotl(rotateAmt: Amount.urem(RHS: Value.getBitWidth())),
679 /*IsUnsigned=*/true);
680
681 pushInteger(S, Result, Call->getType());
682 return true;
683}
684
685static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC,
686 const InterpFrame *Frame,
687 const CallExpr *Call) {
688 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
689 APSInt Value = popToAPSInt(Stk&: S.Stk, T: ArgT);
690
691 uint64_t N = Value.countr_zero();
692 pushInteger(S, N == Value.getBitWidth() ? 0 : N + 1, Call->getType());
693 return true;
694}
695
696static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC,
697 const InterpFrame *Frame,
698 const CallExpr *Call) {
699#ifndef NDEBUG
700 assert(Call->getArg(0)->isLValue());
701 PrimType PtrT = S.getContext().classify(E: Call->getArg(Arg: 0)).value_or(u: PT_Ptr);
702 assert(PtrT == PT_Ptr &&
703 "Unsupported pointer type passed to __builtin_addressof()");
704#endif
705 return true;
706}
707
708static bool interp__builtin_move(InterpState &S, CodePtr OpPC,
709 const InterpFrame *Frame,
710 const CallExpr *Call) {
711 return Call->getDirectCallee()->isConstexpr();
712}
713
714static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC,
715 const InterpFrame *Frame,
716 const CallExpr *Call) {
717 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
718 APSInt Arg = popToAPSInt(Stk&: S.Stk, T: ArgT);
719
720 int Result = S.getASTContext().getTargetInfo().getEHDataRegisterNumber(
721 RegNo: Arg.getZExtValue());
722 pushInteger(S, Result, Call->getType());
723 return true;
724}
725
726// Two integral values followed by a pointer (lhs, rhs, resultOut)
727static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC,
728 const CallExpr *Call,
729 unsigned BuiltinOp) {
730 const Pointer &ResultPtr = S.Stk.pop<Pointer>();
731 if (ResultPtr.isDummy())
732 return false;
733
734 PrimType RHST = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType());
735 PrimType LHST = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
736 APSInt RHS = popToAPSInt(Stk&: S.Stk, T: RHST);
737 APSInt LHS = popToAPSInt(Stk&: S.Stk, T: LHST);
738 QualType ResultType = Call->getArg(Arg: 2)->getType()->getPointeeType();
739 PrimType ResultT = *S.getContext().classify(T: ResultType);
740 bool Overflow;
741
742 APSInt Result;
743 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
744 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
745 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
746 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
747 ResultType->isSignedIntegerOrEnumerationType();
748 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
749 ResultType->isSignedIntegerOrEnumerationType();
750 uint64_t LHSSize = LHS.getBitWidth();
751 uint64_t RHSSize = RHS.getBitWidth();
752 uint64_t ResultSize = S.getASTContext().getTypeSize(T: ResultType);
753 uint64_t MaxBits = std::max(a: std::max(a: LHSSize, b: RHSSize), b: ResultSize);
754
755 // Add an additional bit if the signedness isn't uniformly agreed to. We
756 // could do this ONLY if there is a signed and an unsigned that both have
757 // MaxBits, but the code to check that is pretty nasty. The issue will be
758 // caught in the shrink-to-result later anyway.
759 if (IsSigned && !AllSigned)
760 ++MaxBits;
761
762 LHS = APSInt(LHS.extOrTrunc(width: MaxBits), !IsSigned);
763 RHS = APSInt(RHS.extOrTrunc(width: MaxBits), !IsSigned);
764 Result = APSInt(MaxBits, !IsSigned);
765 }
766
767 // Find largest int.
768 switch (BuiltinOp) {
769 default:
770 llvm_unreachable("Invalid value for BuiltinOp");
771 case Builtin::BI__builtin_add_overflow:
772 case Builtin::BI__builtin_sadd_overflow:
773 case Builtin::BI__builtin_saddl_overflow:
774 case Builtin::BI__builtin_saddll_overflow:
775 case Builtin::BI__builtin_uadd_overflow:
776 case Builtin::BI__builtin_uaddl_overflow:
777 case Builtin::BI__builtin_uaddll_overflow:
778 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
779 : LHS.uadd_ov(RHS, Overflow);
780 break;
781 case Builtin::BI__builtin_sub_overflow:
782 case Builtin::BI__builtin_ssub_overflow:
783 case Builtin::BI__builtin_ssubl_overflow:
784 case Builtin::BI__builtin_ssubll_overflow:
785 case Builtin::BI__builtin_usub_overflow:
786 case Builtin::BI__builtin_usubl_overflow:
787 case Builtin::BI__builtin_usubll_overflow:
788 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
789 : LHS.usub_ov(RHS, Overflow);
790 break;
791 case Builtin::BI__builtin_mul_overflow:
792 case Builtin::BI__builtin_smul_overflow:
793 case Builtin::BI__builtin_smull_overflow:
794 case Builtin::BI__builtin_smulll_overflow:
795 case Builtin::BI__builtin_umul_overflow:
796 case Builtin::BI__builtin_umull_overflow:
797 case Builtin::BI__builtin_umulll_overflow:
798 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
799 : LHS.umul_ov(RHS, Overflow);
800 break;
801 }
802
803 // In the case where multiple sizes are allowed, truncate and see if
804 // the values are the same.
805 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
806 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
807 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
808 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
809 // since it will give us the behavior of a TruncOrSelf in the case where
810 // its parameter <= its size. We previously set Result to be at least the
811 // type-size of the result, so getTypeSize(ResultType) <= Resu
812 APSInt Temp = Result.extOrTrunc(width: S.getASTContext().getTypeSize(T: ResultType));
813 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
814
815 if (!APSInt::isSameValue(I1: Temp, I2: Result))
816 Overflow = true;
817 Result = Temp;
818 }
819
820 // Write Result to ResultPtr and put Overflow on the stack.
821 assignInteger(Dest: ResultPtr, ValueT: ResultT, Value: Result);
822 ResultPtr.initialize();
823 assert(Call->getDirectCallee()->getReturnType()->isBooleanType());
824 S.Stk.push<Boolean>(Args&: Overflow);
825 return true;
826}
827
828/// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
829static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC,
830 const InterpFrame *Frame,
831 const CallExpr *Call, unsigned BuiltinOp) {
832 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
833 PrimType LHST = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
834 PrimType RHST = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType());
835 APSInt CarryIn = popToAPSInt(Stk&: S.Stk, T: LHST);
836 APSInt RHS = popToAPSInt(Stk&: S.Stk, T: RHST);
837 APSInt LHS = popToAPSInt(Stk&: S.Stk, T: LHST);
838
839 APSInt CarryOut;
840
841 APSInt Result;
842 // Copy the number of bits and sign.
843 Result = LHS;
844 CarryOut = LHS;
845
846 bool FirstOverflowed = false;
847 bool SecondOverflowed = false;
848 switch (BuiltinOp) {
849 default:
850 llvm_unreachable("Invalid value for BuiltinOp");
851 case Builtin::BI__builtin_addcb:
852 case Builtin::BI__builtin_addcs:
853 case Builtin::BI__builtin_addc:
854 case Builtin::BI__builtin_addcl:
855 case Builtin::BI__builtin_addcll:
856 Result =
857 LHS.uadd_ov(RHS, Overflow&: FirstOverflowed).uadd_ov(RHS: CarryIn, Overflow&: SecondOverflowed);
858 break;
859 case Builtin::BI__builtin_subcb:
860 case Builtin::BI__builtin_subcs:
861 case Builtin::BI__builtin_subc:
862 case Builtin::BI__builtin_subcl:
863 case Builtin::BI__builtin_subcll:
864 Result =
865 LHS.usub_ov(RHS, Overflow&: FirstOverflowed).usub_ov(RHS: CarryIn, Overflow&: SecondOverflowed);
866 break;
867 }
868 // It is possible for both overflows to happen but CGBuiltin uses an OR so
869 // this is consistent.
870 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
871
872 QualType CarryOutType = Call->getArg(Arg: 3)->getType()->getPointeeType();
873 PrimType CarryOutT = *S.getContext().classify(T: CarryOutType);
874 assignInteger(Dest: CarryOutPtr, ValueT: CarryOutT, Value: CarryOut);
875 CarryOutPtr.initialize();
876
877 assert(Call->getType() == Call->getArg(0)->getType());
878 pushInteger(S, Result, Call->getType());
879 return true;
880}
881
882static bool interp__builtin_clz(InterpState &S, CodePtr OpPC,
883 const InterpFrame *Frame, const CallExpr *Call,
884 unsigned BuiltinOp) {
885
886 std::optional<APSInt> Fallback;
887 if (BuiltinOp == Builtin::BI__builtin_clzg && Call->getNumArgs() == 2) {
888 PrimType FallbackT = *S.getContext().classify(E: Call->getArg(Arg: 1));
889 Fallback = popToAPSInt(Stk&: S.Stk, T: FallbackT);
890 }
891 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
892 const APSInt &Val = popToAPSInt(Stk&: S.Stk, T: ValT);
893
894 // When the argument is 0, the result of GCC builtins is undefined, whereas
895 // for Microsoft intrinsics, the result is the bit-width of the argument.
896 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
897 BuiltinOp != Builtin::BI__lzcnt &&
898 BuiltinOp != Builtin::BI__lzcnt64;
899
900 if (Val == 0) {
901 if (Fallback) {
902 pushInteger(S, *Fallback, Call->getType());
903 return true;
904 }
905
906 if (ZeroIsUndefined)
907 return false;
908 }
909
910 pushInteger(S, Val.countl_zero(), Call->getType());
911 return true;
912}
913
914static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC,
915 const InterpFrame *Frame, const CallExpr *Call,
916 unsigned BuiltinID) {
917 std::optional<APSInt> Fallback;
918 if (BuiltinID == Builtin::BI__builtin_ctzg && Call->getNumArgs() == 2) {
919 PrimType FallbackT = *S.getContext().classify(E: Call->getArg(Arg: 1));
920 Fallback = popToAPSInt(Stk&: S.Stk, T: FallbackT);
921 }
922 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
923 const APSInt &Val = popToAPSInt(Stk&: S.Stk, T: ValT);
924
925 if (Val == 0) {
926 if (Fallback) {
927 pushInteger(S, *Fallback, Call->getType());
928 return true;
929 }
930 return false;
931 }
932
933 pushInteger(S, Val.countr_zero(), Call->getType());
934 return true;
935}
936
937static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC,
938 const InterpFrame *Frame,
939 const CallExpr *Call) {
940 PrimType ReturnT = *S.getContext().classify(Call->getType());
941 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
942 const APSInt &Val = popToAPSInt(Stk&: S.Stk, T: ValT);
943 assert(Val.getActiveBits() <= 64);
944
945 INT_TYPE_SWITCH(ReturnT,
946 { S.Stk.push<T>(T::from(Val.byteSwap().getZExtValue())); });
947 return true;
948}
949
950/// bool __atomic_always_lock_free(size_t, void const volatile*)
951/// bool __atomic_is_lock_free(size_t, void const volatile*)
952static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC,
953 const InterpFrame *Frame,
954 const CallExpr *Call,
955 unsigned BuiltinOp) {
956 auto returnBool = [&S](bool Value) -> bool {
957 S.Stk.push<Boolean>(Args&: Value);
958 return true;
959 };
960
961 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
962 const Pointer &Ptr = S.Stk.pop<Pointer>();
963 const APSInt &SizeVal = popToAPSInt(Stk&: S.Stk, T: ValT);
964
965 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
966 // of two less than or equal to the maximum inline atomic width, we know it
967 // is lock-free. If the size isn't a power of two, or greater than the
968 // maximum alignment where we promote atomics, we know it is not lock-free
969 // (at least not in the sense of atomic_is_lock_free). Otherwise,
970 // the answer can only be determined at runtime; for example, 16-byte
971 // atomics have lock-free implementations on some, but not all,
972 // x86-64 processors.
973
974 // Check power-of-two.
975 CharUnits Size = CharUnits::fromQuantity(Quantity: SizeVal.getZExtValue());
976 if (Size.isPowerOfTwo()) {
977 // Check against inlining width.
978 unsigned InlineWidthBits =
979 S.getASTContext().getTargetInfo().getMaxAtomicInlineWidth();
980 if (Size <= S.getASTContext().toCharUnitsFromBits(BitSize: InlineWidthBits)) {
981
982 // OK, we will inline appropriately-aligned operations of this size,
983 // and _Atomic(T) is appropriately-aligned.
984 if (Size == CharUnits::One())
985 return returnBool(true);
986
987 // Same for null pointers.
988 assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
989 if (Ptr.isZero())
990 return returnBool(true);
991
992 if (Ptr.isIntegralPointer()) {
993 uint64_t IntVal = Ptr.getIntegerRepresentation();
994 if (APSInt(APInt(64, IntVal, false), true).isAligned(A: Size.getAsAlign()))
995 return returnBool(true);
996 }
997
998 const Expr *PtrArg = Call->getArg(Arg: 1);
999 // Otherwise, check if the type's alignment against Size.
1000 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(Val: PtrArg)) {
1001 // Drop the potential implicit-cast to 'const volatile void*', getting
1002 // the underlying type.
1003 if (ICE->getCastKind() == CK_BitCast)
1004 PtrArg = ICE->getSubExpr();
1005 }
1006
1007 if (auto PtrTy = PtrArg->getType()->getAs<PointerType>()) {
1008 QualType PointeeType = PtrTy->getPointeeType();
1009 if (!PointeeType->isIncompleteType() &&
1010 S.getASTContext().getTypeAlignInChars(T: PointeeType) >= Size) {
1011 // OK, we will inline operations on this object.
1012 return returnBool(true);
1013 }
1014 }
1015 }
1016 }
1017
1018 if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
1019 return returnBool(false);
1020
1021 return false;
1022}
1023
1024/// bool __c11_atomic_is_lock_free(size_t)
1025static bool interp__builtin_c11_atomic_is_lock_free(InterpState &S,
1026 CodePtr OpPC,
1027 const InterpFrame *Frame,
1028 const CallExpr *Call) {
1029 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
1030 const APSInt &SizeVal = popToAPSInt(Stk&: S.Stk, T: ValT);
1031
1032 auto returnBool = [&S](bool Value) -> bool {
1033 S.Stk.push<Boolean>(Args&: Value);
1034 return true;
1035 };
1036
1037 CharUnits Size = CharUnits::fromQuantity(Quantity: SizeVal.getZExtValue());
1038 if (Size.isPowerOfTwo()) {
1039 // Check against inlining width.
1040 unsigned InlineWidthBits =
1041 S.getASTContext().getTargetInfo().getMaxAtomicInlineWidth();
1042 if (Size <= S.getASTContext().toCharUnitsFromBits(BitSize: InlineWidthBits))
1043 return returnBool(true);
1044 }
1045
1046 return false; // returnBool(false);
1047}
1048
1049/// __builtin_complex(Float A, float B);
1050static bool interp__builtin_complex(InterpState &S, CodePtr OpPC,
1051 const InterpFrame *Frame,
1052 const CallExpr *Call) {
1053 const Floating &Arg2 = S.Stk.pop<Floating>();
1054 const Floating &Arg1 = S.Stk.pop<Floating>();
1055 Pointer &Result = S.Stk.peek<Pointer>();
1056
1057 Result.atIndex(Idx: 0).deref<Floating>() = Arg1;
1058 Result.atIndex(Idx: 0).initialize();
1059 Result.atIndex(Idx: 1).deref<Floating>() = Arg2;
1060 Result.atIndex(Idx: 1).initialize();
1061 Result.initialize();
1062
1063 return true;
1064}
1065
1066/// __builtin_is_aligned()
1067/// __builtin_align_up()
1068/// __builtin_align_down()
1069/// The first parameter is either an integer or a pointer.
1070/// The second parameter is the requested alignment as an integer.
1071static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC,
1072 const InterpFrame *Frame,
1073 const CallExpr *Call,
1074 unsigned BuiltinOp) {
1075 PrimType AlignmentT = *S.Ctx.classify(E: Call->getArg(Arg: 1));
1076 const APSInt &Alignment = popToAPSInt(Stk&: S.Stk, T: AlignmentT);
1077
1078 if (Alignment < 0 || !Alignment.isPowerOf2()) {
1079 S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
1080 return false;
1081 }
1082 unsigned SrcWidth = S.getASTContext().getIntWidth(T: Call->getArg(Arg: 0)->getType());
1083 APSInt MaxValue(APInt::getOneBitSet(numBits: SrcWidth, BitNo: SrcWidth - 1));
1084 if (APSInt::compareValues(I1: Alignment, I2: MaxValue) > 0) {
1085 S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
1086 << MaxValue << Call->getArg(0)->getType() << Alignment;
1087 return false;
1088 }
1089
1090 // The first parameter is either an integer or a pointer (but not a function
1091 // pointer).
1092 PrimType FirstArgT = *S.Ctx.classify(E: Call->getArg(Arg: 0));
1093
1094 if (isIntegralType(T: FirstArgT)) {
1095 const APSInt &Src = popToAPSInt(Stk&: S.Stk, T: FirstArgT);
1096 APSInt Align = Alignment.extOrTrunc(width: Src.getBitWidth());
1097 if (BuiltinOp == Builtin::BI__builtin_align_up) {
1098 APSInt AlignedVal =
1099 APSInt((Src + (Align - 1)) & ~(Align - 1), Src.isUnsigned());
1100 pushInteger(S, AlignedVal, Call->getType());
1101 } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
1102 APSInt AlignedVal = APSInt(Src & ~(Align - 1), Src.isUnsigned());
1103 pushInteger(S, AlignedVal, Call->getType());
1104 } else {
1105 assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
1106 S.Stk.push<Boolean>(Args: (Src & (Align - 1)) == 0);
1107 }
1108 return true;
1109 }
1110
1111 assert(FirstArgT == PT_Ptr);
1112 const Pointer &Ptr = S.Stk.pop<Pointer>();
1113
1114 unsigned PtrOffset = Ptr.getByteOffset();
1115 PtrOffset = Ptr.getIndex();
1116 CharUnits BaseAlignment =
1117 S.getASTContext().getDeclAlign(Ptr.getDeclDesc()->asValueDecl());
1118 CharUnits PtrAlign =
1119 BaseAlignment.alignmentAtOffset(offset: CharUnits::fromQuantity(Quantity: PtrOffset));
1120
1121 if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
1122 if (PtrAlign.getQuantity() >= Alignment) {
1123 S.Stk.push<Boolean>(Args: true);
1124 return true;
1125 }
1126 // If the alignment is not known to be sufficient, some cases could still
1127 // be aligned at run time. However, if the requested alignment is less or
1128 // equal to the base alignment and the offset is not aligned, we know that
1129 // the run-time value can never be aligned.
1130 if (BaseAlignment.getQuantity() >= Alignment &&
1131 PtrAlign.getQuantity() < Alignment) {
1132 S.Stk.push<Boolean>(Args: false);
1133 return true;
1134 }
1135
1136 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
1137 << Alignment;
1138 return false;
1139 }
1140
1141 assert(BuiltinOp == Builtin::BI__builtin_align_down ||
1142 BuiltinOp == Builtin::BI__builtin_align_up);
1143
1144 // For align_up/align_down, we can return the same value if the alignment
1145 // is known to be greater or equal to the requested value.
1146 if (PtrAlign.getQuantity() >= Alignment) {
1147 S.Stk.push<Pointer>(Args: Ptr);
1148 return true;
1149 }
1150
1151 // The alignment could be greater than the minimum at run-time, so we cannot
1152 // infer much about the resulting pointer value. One case is possible:
1153 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
1154 // can infer the correct index if the requested alignment is smaller than
1155 // the base alignment so we can perform the computation on the offset.
1156 if (BaseAlignment.getQuantity() >= Alignment) {
1157 assert(Alignment.getBitWidth() <= 64 &&
1158 "Cannot handle > 64-bit address-space");
1159 uint64_t Alignment64 = Alignment.getZExtValue();
1160 CharUnits NewOffset =
1161 CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
1162 ? llvm::alignDown(PtrOffset, Alignment64)
1163 : llvm::alignTo(PtrOffset, Alignment64));
1164
1165 S.Stk.push<Pointer>(Args: Ptr.atIndex(Idx: NewOffset.getQuantity()));
1166 return true;
1167 }
1168
1169 // Otherwise, we cannot constant-evaluate the result.
1170 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
1171 return false;
1172}
1173
1174/// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
1175static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC,
1176 const InterpFrame *Frame,
1177 const CallExpr *Call) {
1178 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
1179
1180 std::optional<APSInt> ExtraOffset;
1181 if (Call->getNumArgs() == 3)
1182 ExtraOffset = popToAPSInt(Stk&: S.Stk, T: *S.Ctx.classify(E: Call->getArg(Arg: 2)));
1183
1184 APSInt Alignment = popToAPSInt(Stk&: S.Stk, T: *S.Ctx.classify(E: Call->getArg(Arg: 1)));
1185 const Pointer &Ptr = S.Stk.pop<Pointer>();
1186
1187 CharUnits Align = CharUnits::fromQuantity(Quantity: Alignment.getZExtValue());
1188
1189 // If there is a base object, then it must have the correct alignment.
1190 if (Ptr.isBlockPointer()) {
1191 CharUnits BaseAlignment;
1192 if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
1193 BaseAlignment = S.getASTContext().getDeclAlign(VD);
1194 else if (const auto *E = Ptr.getDeclDesc()->asExpr())
1195 BaseAlignment = GetAlignOfExpr(Ctx: S.getASTContext(), E, ExprKind: UETT_AlignOf);
1196
1197 if (BaseAlignment < Align) {
1198 S.CCEDiag(Call->getArg(0),
1199 diag::note_constexpr_baa_insufficient_alignment)
1200 << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
1201 return false;
1202 }
1203 }
1204
1205 APValue AV = Ptr.toAPValue(ASTCtx: S.getASTContext());
1206 CharUnits AVOffset = AV.getLValueOffset();
1207 if (ExtraOffset)
1208 AVOffset -= CharUnits::fromQuantity(Quantity: ExtraOffset->getZExtValue());
1209 if (AVOffset.alignTo(Align) != AVOffset) {
1210 if (Ptr.isBlockPointer())
1211 S.CCEDiag(Call->getArg(0),
1212 diag::note_constexpr_baa_insufficient_alignment)
1213 << 1 << AVOffset.getQuantity() << Align.getQuantity();
1214 else
1215 S.CCEDiag(Call->getArg(0),
1216 diag::note_constexpr_baa_value_insufficient_alignment)
1217 << AVOffset.getQuantity() << Align.getQuantity();
1218 return false;
1219 }
1220
1221 S.Stk.push<Pointer>(Args: Ptr);
1222 return true;
1223}
1224
1225static bool interp__builtin_ia32_bextr(InterpState &S, CodePtr OpPC,
1226 const InterpFrame *Frame,
1227 const CallExpr *Call) {
1228 if (Call->getNumArgs() != 2 || !Call->getArg(Arg: 0)->getType()->isIntegerType() ||
1229 !Call->getArg(Arg: 1)->getType()->isIntegerType())
1230 return false;
1231
1232 PrimType ValT = *S.Ctx.classify(E: Call->getArg(Arg: 0));
1233 PrimType IndexT = *S.Ctx.classify(E: Call->getArg(Arg: 1));
1234 APSInt Index = popToAPSInt(Stk&: S.Stk, T: IndexT);
1235 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ValT);
1236
1237 unsigned BitWidth = Val.getBitWidth();
1238 uint64_t Shift = Index.extractBitsAsZExtValue(numBits: 8, bitPosition: 0);
1239 uint64_t Length = Index.extractBitsAsZExtValue(numBits: 8, bitPosition: 8);
1240 Length = Length > BitWidth ? BitWidth : Length;
1241
1242 // Handle out of bounds cases.
1243 if (Length == 0 || Shift >= BitWidth) {
1244 pushInteger(S, 0, Call->getType());
1245 return true;
1246 }
1247
1248 uint64_t Result = Val.getZExtValue() >> Shift;
1249 Result &= llvm::maskTrailingOnes<uint64_t>(N: Length);
1250 pushInteger(S, Result, Call->getType());
1251 return true;
1252}
1253
1254static bool interp__builtin_ia32_bzhi(InterpState &S, CodePtr OpPC,
1255 const InterpFrame *Frame,
1256 const CallExpr *Call) {
1257 QualType CallType = Call->getType();
1258 if (Call->getNumArgs() != 2 || !Call->getArg(Arg: 0)->getType()->isIntegerType() ||
1259 !Call->getArg(Arg: 1)->getType()->isIntegerType() ||
1260 !CallType->isIntegerType())
1261 return false;
1262
1263 PrimType ValT = *S.Ctx.classify(E: Call->getArg(Arg: 0));
1264 PrimType IndexT = *S.Ctx.classify(E: Call->getArg(Arg: 1));
1265
1266 APSInt Idx = popToAPSInt(Stk&: S.Stk, T: IndexT);
1267 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ValT);
1268
1269 unsigned BitWidth = Val.getBitWidth();
1270 uint64_t Index = Idx.extractBitsAsZExtValue(numBits: 8, bitPosition: 0);
1271
1272 if (Index < BitWidth)
1273 Val.clearHighBits(hiBits: BitWidth - Index);
1274
1275 pushInteger(S, Val, QT: CallType);
1276 return true;
1277}
1278
1279static bool interp__builtin_ia32_lzcnt(InterpState &S, CodePtr OpPC,
1280 const InterpFrame *Frame,
1281 const CallExpr *Call) {
1282 QualType CallType = Call->getType();
1283 if (!CallType->isIntegerType() ||
1284 !Call->getArg(Arg: 0)->getType()->isIntegerType())
1285 return false;
1286
1287 APSInt Val = popToAPSInt(Stk&: S.Stk, T: *S.Ctx.classify(E: Call->getArg(Arg: 0)));
1288 pushInteger(S, Val: Val.countLeadingZeros(), QT: CallType);
1289 return true;
1290}
1291
1292static bool interp__builtin_ia32_tzcnt(InterpState &S, CodePtr OpPC,
1293 const InterpFrame *Frame,
1294 const CallExpr *Call) {
1295 QualType CallType = Call->getType();
1296 if (!CallType->isIntegerType() ||
1297 !Call->getArg(Arg: 0)->getType()->isIntegerType())
1298 return false;
1299
1300 APSInt Val = popToAPSInt(Stk&: S.Stk, T: *S.Ctx.classify(E: Call->getArg(Arg: 0)));
1301 pushInteger(S, Val: Val.countTrailingZeros(), QT: CallType);
1302 return true;
1303}
1304
1305static bool interp__builtin_ia32_pdep(InterpState &S, CodePtr OpPC,
1306 const InterpFrame *Frame,
1307 const CallExpr *Call) {
1308 if (Call->getNumArgs() != 2 || !Call->getArg(Arg: 0)->getType()->isIntegerType() ||
1309 !Call->getArg(Arg: 1)->getType()->isIntegerType())
1310 return false;
1311
1312 PrimType ValT = *S.Ctx.classify(E: Call->getArg(Arg: 0));
1313 PrimType MaskT = *S.Ctx.classify(E: Call->getArg(Arg: 1));
1314
1315 APSInt Mask = popToAPSInt(Stk&: S.Stk, T: MaskT);
1316 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ValT);
1317
1318 unsigned BitWidth = Val.getBitWidth();
1319 APInt Result = APInt::getZero(numBits: BitWidth);
1320 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1321 if (Mask[I])
1322 Result.setBitVal(BitPosition: I, BitValue: Val[P++]);
1323 }
1324 pushInteger(S, std::move(Result), Call->getType());
1325 return true;
1326}
1327
1328static bool interp__builtin_ia32_pext(InterpState &S, CodePtr OpPC,
1329 const InterpFrame *Frame,
1330 const CallExpr *Call) {
1331 if (Call->getNumArgs() != 2 || !Call->getArg(Arg: 0)->getType()->isIntegerType() ||
1332 !Call->getArg(Arg: 1)->getType()->isIntegerType())
1333 return false;
1334
1335 PrimType ValT = *S.Ctx.classify(E: Call->getArg(Arg: 0));
1336 PrimType MaskT = *S.Ctx.classify(E: Call->getArg(Arg: 1));
1337
1338 APSInt Mask = popToAPSInt(Stk&: S.Stk, T: MaskT);
1339 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ValT);
1340
1341 unsigned BitWidth = Val.getBitWidth();
1342 APInt Result = APInt::getZero(numBits: BitWidth);
1343 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1344 if (Mask[I])
1345 Result.setBitVal(BitPosition: P++, BitValue: Val[I]);
1346 }
1347 pushInteger(S, std::move(Result), Call->getType());
1348 return true;
1349}
1350
1351/// (CarryIn, LHS, RHS, Result)
1352static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S,
1353 CodePtr OpPC,
1354 const InterpFrame *Frame,
1355 const CallExpr *Call,
1356 unsigned BuiltinOp) {
1357 if (Call->getNumArgs() != 4 || !Call->getArg(Arg: 0)->getType()->isIntegerType() ||
1358 !Call->getArg(Arg: 1)->getType()->isIntegerType() ||
1359 !Call->getArg(Arg: 2)->getType()->isIntegerType())
1360 return false;
1361
1362 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
1363
1364 PrimType CarryInT = *S.getContext().classify(E: Call->getArg(Arg: 0));
1365 PrimType LHST = *S.getContext().classify(E: Call->getArg(Arg: 1));
1366 PrimType RHST = *S.getContext().classify(E: Call->getArg(Arg: 2));
1367 APSInt RHS = popToAPSInt(Stk&: S.Stk, T: RHST);
1368 APSInt LHS = popToAPSInt(Stk&: S.Stk, T: LHST);
1369 APSInt CarryIn = popToAPSInt(Stk&: S.Stk, T: CarryInT);
1370
1371 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
1372 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
1373
1374 unsigned BitWidth = LHS.getBitWidth();
1375 unsigned CarryInBit = CarryIn.ugt(RHS: 0) ? 1 : 0;
1376 APInt ExResult =
1377 IsAdd ? (LHS.zext(width: BitWidth + 1) + (RHS.zext(width: BitWidth + 1) + CarryInBit))
1378 : (LHS.zext(width: BitWidth + 1) - (RHS.zext(width: BitWidth + 1) + CarryInBit));
1379
1380 APInt Result = ExResult.extractBits(numBits: BitWidth, bitPosition: 0);
1381 APSInt CarryOut =
1382 APSInt(ExResult.extractBits(numBits: 1, bitPosition: BitWidth), /*IsUnsigned=*/true);
1383
1384 QualType CarryOutType = Call->getArg(Arg: 3)->getType()->getPointeeType();
1385 PrimType CarryOutT = *S.getContext().classify(T: CarryOutType);
1386 assignInteger(Dest: CarryOutPtr, ValueT: CarryOutT, Value: APSInt(Result, true));
1387
1388 pushInteger(S, CarryOut, Call->getType());
1389
1390 return true;
1391}
1392
1393static bool interp__builtin_os_log_format_buffer_size(InterpState &S,
1394 CodePtr OpPC,
1395 const InterpFrame *Frame,
1396 const CallExpr *Call) {
1397 analyze_os_log::OSLogBufferLayout Layout;
1398 analyze_os_log::computeOSLogBufferLayout(Ctx&: S.getASTContext(), E: Call, layout&: Layout);
1399 pushInteger(S, Layout.size().getQuantity(), Call->getType());
1400 return true;
1401}
1402
1403static bool
1404interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC,
1405 const InterpFrame *Frame,
1406 const CallExpr *Call) {
1407 const auto &Ptr = S.Stk.pop<Pointer>();
1408 assert(Ptr.getFieldDesc()->isPrimitiveArray());
1409
1410 // This should be created for a StringLiteral, so should alway shold at least
1411 // one array element.
1412 assert(Ptr.getFieldDesc()->getNumElems() >= 1);
1413 StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
1414 uint64_t Result = getPointerAuthStableSipHash(S: R);
1415 pushInteger(S, Result, Call->getType());
1416 return true;
1417}
1418
1419static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC,
1420 const InterpFrame *Frame,
1421 const CallExpr *Call) {
1422 // A call to __operator_new is only valid within std::allocate<>::allocate.
1423 // Walk up the call stack to find the appropriate caller and get the
1424 // element type from it.
1425 auto [NewCall, ElemType] = S.getStdAllocatorCaller(Name: "allocate");
1426 APSInt Bytes = popToAPSInt(Stk&: S.Stk, T: *S.getContext().classify(E: Call->getArg(Arg: 0)));
1427
1428 if (ElemType.isNull()) {
1429 S.FFDiag(Call, S.getLangOpts().CPlusPlus20
1430 ? diag::note_constexpr_new_untyped
1431 : diag::note_constexpr_new);
1432 return false;
1433 }
1434 assert(NewCall);
1435
1436 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
1437 S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type)
1438 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
1439 return false;
1440 }
1441
1442 CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(T: ElemType);
1443 assert(!ElemSize.isZero());
1444 // Divide the number of bytes by sizeof(ElemType), so we get the number of
1445 // elements we should allocate.
1446 APInt NumElems, Remainder;
1447 APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
1448 APInt::udivrem(LHS: Bytes, RHS: ElemSizeAP, Quotient&: NumElems, Remainder);
1449 if (Remainder != 0) {
1450 // This likely indicates a bug in the implementation of 'std::allocator'.
1451 S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size)
1452 << Bytes << APSInt(ElemSizeAP, true) << ElemType;
1453 return false;
1454 }
1455
1456 // NB: The same check we're using in CheckArraySize()
1457 if (NumElems.getActiveBits() >
1458 ConstantArrayType::getMaxSizeBits(Context: S.getASTContext()) ||
1459 NumElems.ugt(RHS: Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
1460 // FIXME: NoThrow check?
1461 const SourceInfo &Loc = S.Current->getSource(PC: OpPC);
1462 S.FFDiag(Loc, diag::note_constexpr_new_too_large)
1463 << NumElems.getZExtValue();
1464 return false;
1465 }
1466
1467 if (!CheckArraySize(S, OpPC, NumElems: NumElems.getZExtValue()))
1468 return false;
1469
1470 bool IsArray = NumElems.ugt(RHS: 1);
1471 std::optional<PrimType> ElemT = S.getContext().classify(T: ElemType);
1472 DynamicAllocator &Allocator = S.getAllocator();
1473 if (ElemT) {
1474 if (IsArray) {
1475 Block *B = Allocator.allocate(Source: NewCall, T: *ElemT, NumElements: NumElems.getZExtValue(),
1476 EvalID: S.Ctx.getEvalID(),
1477 AllocForm: DynamicAllocator::Form::Operator);
1478 assert(B);
1479 S.Stk.push<Pointer>(Args: Pointer(B).atIndex(Idx: 0));
1480 return true;
1481 }
1482
1483 const Descriptor *Desc = S.P.createDescriptor(
1484 D: NewCall, T: *ElemT, SourceTy: ElemType.getTypePtr(), MDSize: Descriptor::InlineDescMD,
1485 /*IsConst=*/false, /*IsTemporary=*/false,
1486 /*IsMutable=*/false);
1487 Block *B = Allocator.allocate(D: Desc, EvalID: S.getContext().getEvalID(),
1488 AllocForm: DynamicAllocator::Form::Operator);
1489 assert(B);
1490
1491 S.Stk.push<Pointer>(Args&: B);
1492 return true;
1493 }
1494
1495 assert(!ElemT);
1496 // Structs etc.
1497 const Descriptor *Desc =
1498 S.P.createDescriptor(D: NewCall, Ty: ElemType.getTypePtr(),
1499 MDSize: IsArray ? std::nullopt : Descriptor::InlineDescMD);
1500
1501 if (IsArray) {
1502 Block *B =
1503 Allocator.allocate(D: Desc, NumElements: NumElems.getZExtValue(), EvalID: S.Ctx.getEvalID(),
1504 AllocForm: DynamicAllocator::Form::Operator);
1505 assert(B);
1506 S.Stk.push<Pointer>(Args: Pointer(B).atIndex(Idx: 0));
1507 return true;
1508 }
1509
1510 Block *B = Allocator.allocate(D: Desc, EvalID: S.getContext().getEvalID(),
1511 AllocForm: DynamicAllocator::Form::Operator);
1512 assert(B);
1513 S.Stk.push<Pointer>(Args&: B);
1514 return true;
1515}
1516
1517static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC,
1518 const InterpFrame *Frame,
1519 const CallExpr *Call) {
1520 const Expr *Source = nullptr;
1521 const Block *BlockToDelete = nullptr;
1522
1523 if (S.checkingPotentialConstantExpression()) {
1524 S.Stk.discard<Pointer>();
1525 return false;
1526 }
1527
1528 // This is permitted only within a call to std::allocator<T>::deallocate.
1529 if (!S.getStdAllocatorCaller(Name: "deallocate")) {
1530 S.FFDiag(Call);
1531 S.Stk.discard<Pointer>();
1532 return true;
1533 }
1534
1535 {
1536 const Pointer &Ptr = S.Stk.pop<Pointer>();
1537
1538 if (Ptr.isZero()) {
1539 S.CCEDiag(Call, diag::note_constexpr_deallocate_null);
1540 return true;
1541 }
1542
1543 Source = Ptr.getDeclDesc()->asExpr();
1544 BlockToDelete = Ptr.block();
1545
1546 if (!BlockToDelete->isDynamic()) {
1547 S.FFDiag(Call, diag::note_constexpr_delete_not_heap_alloc)
1548 << Ptr.toDiagnosticString(S.getASTContext());
1549 if (const auto *D = Ptr.getFieldDesc()->asDecl())
1550 S.Note(D->getLocation(), diag::note_declared_at);
1551 }
1552 }
1553 assert(BlockToDelete);
1554
1555 DynamicAllocator &Allocator = S.getAllocator();
1556 const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
1557 std::optional<DynamicAllocator::Form> AllocForm =
1558 Allocator.getAllocationForm(Source);
1559
1560 if (!Allocator.deallocate(Source, BlockToDelete, S)) {
1561 // Nothing has been deallocated, this must be a double-delete.
1562 const SourceInfo &Loc = S.Current->getSource(PC: OpPC);
1563 S.FFDiag(Loc, diag::note_constexpr_double_delete);
1564 return false;
1565 }
1566 assert(AllocForm);
1567
1568 return CheckNewDeleteForms(
1569 S, OpPC, AllocForm: *AllocForm, DeleteForm: DynamicAllocator::Form::Operator, D: BlockDesc, NewExpr: Source);
1570}
1571
1572static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC,
1573 const InterpFrame *Frame,
1574 const CallExpr *Call) {
1575 const Floating &Arg0 = S.Stk.pop<Floating>();
1576 S.Stk.push<Floating>(Args: Arg0);
1577 return true;
1578}
1579
1580static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC,
1581 const CallExpr *Call, unsigned ID) {
1582 const Pointer &Arg = S.Stk.pop<Pointer>();
1583 assert(Arg.getFieldDesc()->isPrimitiveArray());
1584
1585 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1586 assert(Call->getType() == ElemType);
1587 PrimType ElemT = *S.getContext().classify(T: ElemType);
1588 unsigned NumElems = Arg.getNumElems();
1589
1590 INT_TYPE_SWITCH_NO_BOOL(ElemT, {
1591 T Result = Arg.atIndex(0).deref<T>();
1592 unsigned BitWidth = Result.bitWidth();
1593 for (unsigned I = 1; I != NumElems; ++I) {
1594 T Elem = Arg.atIndex(I).deref<T>();
1595 T PrevResult = Result;
1596
1597 if (ID == Builtin::BI__builtin_reduce_add) {
1598 if (T::add(Result, Elem, BitWidth, &Result)) {
1599 unsigned OverflowBits = BitWidth + 1;
1600 (void)handleOverflow(S, OpPC,
1601 (PrevResult.toAPSInt(OverflowBits) +
1602 Elem.toAPSInt(OverflowBits)));
1603 return false;
1604 }
1605 } else if (ID == Builtin::BI__builtin_reduce_mul) {
1606 if (T::mul(Result, Elem, BitWidth, &Result)) {
1607 unsigned OverflowBits = BitWidth * 2;
1608 (void)handleOverflow(S, OpPC,
1609 (PrevResult.toAPSInt(OverflowBits) *
1610 Elem.toAPSInt(OverflowBits)));
1611 return false;
1612 }
1613
1614 } else if (ID == Builtin::BI__builtin_reduce_and) {
1615 (void)T::bitAnd(Result, Elem, BitWidth, &Result);
1616 } else if (ID == Builtin::BI__builtin_reduce_or) {
1617 (void)T::bitOr(Result, Elem, BitWidth, &Result);
1618 } else if (ID == Builtin::BI__builtin_reduce_xor) {
1619 (void)T::bitXor(Result, Elem, BitWidth, &Result);
1620 } else {
1621 llvm_unreachable("Unhandled vector reduce builtin");
1622 }
1623 }
1624 pushInteger(S, Result.toAPSInt(), Call->getType());
1625 });
1626
1627 return true;
1628}
1629
1630/// Can be called with an integer or vector as the first and only parameter.
1631static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC,
1632 const InterpFrame *Frame,
1633 const CallExpr *Call) {
1634 assert(Call->getNumArgs() == 1);
1635 if (Call->getArg(Arg: 0)->getType()->isIntegerType()) {
1636 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
1637 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
1638 pushInteger(S, Val.popcount(), Call->getType());
1639 return true;
1640 }
1641 // Otherwise, the argument must be a vector.
1642 assert(Call->getArg(0)->getType()->isVectorType());
1643 const Pointer &Arg = S.Stk.pop<Pointer>();
1644 assert(Arg.getFieldDesc()->isPrimitiveArray());
1645 const Pointer &Dst = S.Stk.peek<Pointer>();
1646 assert(Dst.getFieldDesc()->isPrimitiveArray());
1647 assert(Arg.getFieldDesc()->getNumElems() ==
1648 Dst.getFieldDesc()->getNumElems());
1649
1650 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1651 PrimType ElemT = *S.getContext().classify(T: ElemType);
1652 unsigned NumElems = Arg.getNumElems();
1653
1654 // FIXME: Reading from uninitialized vector elements?
1655 for (unsigned I = 0; I != NumElems; ++I) {
1656 INT_TYPE_SWITCH_NO_BOOL(ElemT, {
1657 Dst.atIndex(I).deref<T>() =
1658 T::from(Arg.atIndex(I).deref<T>().toAPSInt().popcount());
1659 Dst.atIndex(I).initialize();
1660 });
1661 }
1662
1663 return true;
1664}
1665
1666static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC,
1667 const InterpFrame *Frame,
1668 const CallExpr *Call, unsigned ID) {
1669 assert(Call->getNumArgs() == 3);
1670 const ASTContext &ASTCtx = S.getASTContext();
1671 PrimType SizeT = *S.getContext().classify(E: Call->getArg(Arg: 2));
1672 APSInt Size = popToAPSInt(Stk&: S.Stk, T: SizeT);
1673 const Pointer SrcPtr = S.Stk.pop<Pointer>();
1674 const Pointer DestPtr = S.Stk.pop<Pointer>();
1675
1676 assert(!Size.isSigned() && "memcpy and friends take an unsigned size");
1677
1678 if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
1679 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1680
1681 bool Move =
1682 (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove ||
1683 ID == Builtin::BI__builtin_wmemmove || ID == Builtin::BIwmemmove);
1684 bool WChar = ID == Builtin::BIwmemcpy || ID == Builtin::BIwmemmove ||
1685 ID == Builtin::BI__builtin_wmemcpy ||
1686 ID == Builtin::BI__builtin_wmemmove;
1687
1688 // If the size is zero, we treat this as always being a valid no-op.
1689 if (Size.isZero()) {
1690 S.Stk.push<Pointer>(Args: DestPtr);
1691 return true;
1692 }
1693
1694 if (SrcPtr.isZero() || DestPtr.isZero()) {
1695 Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
1696 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1697 << /*IsMove=*/Move << /*IsWchar=*/WChar << !SrcPtr.isZero()
1698 << DiagPtr.toDiagnosticString(ASTCtx);
1699 return false;
1700 }
1701
1702 // Diagnose integral src/dest pointers specially.
1703 if (SrcPtr.isIntegralPointer() || DestPtr.isIntegralPointer()) {
1704 std::string DiagVal = "(void *)";
1705 DiagVal += SrcPtr.isIntegralPointer()
1706 ? std::to_string(val: SrcPtr.getIntegerRepresentation())
1707 : std::to_string(val: DestPtr.getIntegerRepresentation());
1708 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1709 << Move << WChar << DestPtr.isIntegralPointer() << DiagVal;
1710 return false;
1711 }
1712
1713 // Can't read from dummy pointers.
1714 if (DestPtr.isDummy() || SrcPtr.isDummy())
1715 return false;
1716
1717 QualType DestElemType = getElemType(P: DestPtr);
1718 size_t RemainingDestElems;
1719 if (DestPtr.getFieldDesc()->isArray()) {
1720 RemainingDestElems = DestPtr.isUnknownSizeArray()
1721 ? 0
1722 : (DestPtr.getNumElems() - DestPtr.getIndex());
1723 } else {
1724 RemainingDestElems = 1;
1725 }
1726 unsigned DestElemSize = ASTCtx.getTypeSizeInChars(T: DestElemType).getQuantity();
1727
1728 if (WChar) {
1729 uint64_t WCharSize =
1730 ASTCtx.getTypeSizeInChars(T: ASTCtx.getWCharType()).getQuantity();
1731 Size *= APSInt(APInt(Size.getBitWidth(), WCharSize, /*IsSigned=*/false),
1732 /*IsUnsigend=*/true);
1733 }
1734
1735 if (Size.urem(RHS: DestElemSize) != 0) {
1736 S.FFDiag(S.Current->getSource(OpPC),
1737 diag::note_constexpr_memcpy_unsupported)
1738 << Move << WChar << 0 << DestElemType << Size << DestElemSize;
1739 return false;
1740 }
1741
1742 QualType SrcElemType = getElemType(P: SrcPtr);
1743 size_t RemainingSrcElems;
1744 if (SrcPtr.getFieldDesc()->isArray()) {
1745 RemainingSrcElems = SrcPtr.isUnknownSizeArray()
1746 ? 0
1747 : (SrcPtr.getNumElems() - SrcPtr.getIndex());
1748 } else {
1749 RemainingSrcElems = 1;
1750 }
1751 unsigned SrcElemSize = ASTCtx.getTypeSizeInChars(T: SrcElemType).getQuantity();
1752
1753 if (!ASTCtx.hasSameUnqualifiedType(T1: DestElemType, T2: SrcElemType)) {
1754 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_type_pun)
1755 << Move << SrcElemType << DestElemType;
1756 return false;
1757 }
1758
1759 if (DestElemType->isIncompleteType() ||
1760 DestPtr.getType()->isIncompleteType()) {
1761 QualType DiagType =
1762 DestElemType->isIncompleteType() ? DestElemType : DestPtr.getType();
1763 S.FFDiag(S.Current->getSource(OpPC),
1764 diag::note_constexpr_memcpy_incomplete_type)
1765 << Move << DiagType;
1766 return false;
1767 }
1768
1769 if (!DestElemType.isTriviallyCopyableType(Context: ASTCtx)) {
1770 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_nontrivial)
1771 << Move << DestElemType;
1772 return false;
1773 }
1774
1775 // Check if we have enough elements to read from and write to.
1776 size_t RemainingDestBytes = RemainingDestElems * DestElemSize;
1777 size_t RemainingSrcBytes = RemainingSrcElems * SrcElemSize;
1778 if (Size.ugt(RHS: RemainingDestBytes) || Size.ugt(RHS: RemainingSrcBytes)) {
1779 APInt N = Size.udiv(RHS: DestElemSize);
1780 S.FFDiag(S.Current->getSource(OpPC),
1781 diag::note_constexpr_memcpy_unsupported)
1782 << Move << WChar << (Size.ugt(RemainingSrcBytes) ? 1 : 2)
1783 << DestElemType << toString(N, 10, /*Signed=*/false);
1784 return false;
1785 }
1786
1787 // Check for overlapping memory regions.
1788 if (!Move && Pointer::pointToSameBlock(A: SrcPtr, B: DestPtr)) {
1789 // Remove base casts.
1790 Pointer SrcP = SrcPtr;
1791 while (SrcP.isBaseClass())
1792 SrcP = SrcP.getBase();
1793
1794 Pointer DestP = DestPtr;
1795 while (DestP.isBaseClass())
1796 DestP = DestP.getBase();
1797
1798 unsigned SrcIndex = SrcP.expand().getIndex() * SrcP.elemSize();
1799 unsigned DstIndex = DestP.expand().getIndex() * DestP.elemSize();
1800 unsigned N = Size.getZExtValue();
1801
1802 if ((SrcIndex <= DstIndex && (SrcIndex + N) > DstIndex) ||
1803 (DstIndex <= SrcIndex && (DstIndex + N) > SrcIndex)) {
1804 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_overlap)
1805 << /*IsWChar=*/false;
1806 return false;
1807 }
1808 }
1809
1810 assert(Size.getZExtValue() % DestElemSize == 0);
1811 if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Size: Bytes(Size.getZExtValue()).toBits()))
1812 return false;
1813
1814 S.Stk.push<Pointer>(Args: DestPtr);
1815 return true;
1816}
1817
1818/// Determine if T is a character type for which we guarantee that
1819/// sizeof(T) == 1.
1820static bool isOneByteCharacterType(QualType T) {
1821 return T->isCharType() || T->isChar8Type();
1822}
1823
1824static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC,
1825 const InterpFrame *Frame,
1826 const CallExpr *Call, unsigned ID) {
1827 assert(Call->getNumArgs() == 3);
1828 PrimType SizeT = *S.getContext().classify(E: Call->getArg(Arg: 2));
1829 const APSInt &Size = popToAPSInt(Stk&: S.Stk, T: SizeT);
1830 const Pointer &PtrB = S.Stk.pop<Pointer>();
1831 const Pointer &PtrA = S.Stk.pop<Pointer>();
1832
1833 if (ID == Builtin::BImemcmp || ID == Builtin::BIbcmp ||
1834 ID == Builtin::BIwmemcmp)
1835 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1836
1837 if (Size.isZero()) {
1838 pushInteger(S, 0, Call->getType());
1839 return true;
1840 }
1841
1842 bool IsWide =
1843 (ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
1844
1845 const ASTContext &ASTCtx = S.getASTContext();
1846 QualType ElemTypeA = getElemType(P: PtrA);
1847 QualType ElemTypeB = getElemType(P: PtrB);
1848 // FIXME: This is an arbitrary limitation the current constant interpreter
1849 // had. We could remove this.
1850 if (!IsWide && (!isOneByteCharacterType(T: ElemTypeA) ||
1851 !isOneByteCharacterType(T: ElemTypeB))) {
1852 S.FFDiag(S.Current->getSource(OpPC),
1853 diag::note_constexpr_memcmp_unsupported)
1854 << ASTCtx.BuiltinInfo.getQuotedName(ID) << PtrA.getType()
1855 << PtrB.getType();
1856 return false;
1857 }
1858
1859 if (PtrA.isDummy() || PtrB.isDummy())
1860 return false;
1861
1862 // Now, read both pointers to a buffer and compare those.
1863 BitcastBuffer BufferA(
1864 Bits(ASTCtx.getTypeSize(T: ElemTypeA) * PtrA.getNumElems()));
1865 readPointerToBuffer(Ctx: S.getContext(), FromPtr: PtrA, Buffer&: BufferA, ReturnOnUninit: false);
1866 // FIXME: The swapping here is UNDOING something we do when reading the
1867 // data into the buffer.
1868 if (ASTCtx.getTargetInfo().isBigEndian())
1869 swapBytes(M: BufferA.Data.get(), N: BufferA.byteSize().getQuantity());
1870
1871 BitcastBuffer BufferB(
1872 Bits(ASTCtx.getTypeSize(T: ElemTypeB) * PtrB.getNumElems()));
1873 readPointerToBuffer(Ctx: S.getContext(), FromPtr: PtrB, Buffer&: BufferB, ReturnOnUninit: false);
1874 // FIXME: The swapping here is UNDOING something we do when reading the
1875 // data into the buffer.
1876 if (ASTCtx.getTargetInfo().isBigEndian())
1877 swapBytes(M: BufferB.Data.get(), N: BufferB.byteSize().getQuantity());
1878
1879 size_t MinBufferSize = std::min(a: BufferA.byteSize().getQuantity(),
1880 b: BufferB.byteSize().getQuantity());
1881
1882 unsigned ElemSize = 1;
1883 if (IsWide)
1884 ElemSize = ASTCtx.getTypeSizeInChars(T: ASTCtx.getWCharType()).getQuantity();
1885 // The Size given for the wide variants is in wide-char units. Convert it
1886 // to bytes.
1887 size_t ByteSize = Size.getZExtValue() * ElemSize;
1888 size_t CmpSize = std::min(a: MinBufferSize, b: ByteSize);
1889
1890 for (size_t I = 0; I != CmpSize; I += ElemSize) {
1891 if (IsWide) {
1892 INT_TYPE_SWITCH(*S.getContext().classify(ASTCtx.getWCharType()), {
1893 T A = *reinterpret_cast<T *>(BufferA.Data.get() + I);
1894 T B = *reinterpret_cast<T *>(BufferB.Data.get() + I);
1895 if (A < B) {
1896 pushInteger(S, -1, Call->getType());
1897 return true;
1898 } else if (A > B) {
1899 pushInteger(S, 1, Call->getType());
1900 return true;
1901 }
1902 });
1903 } else {
1904 std::byte A = BufferA.Data[I];
1905 std::byte B = BufferB.Data[I];
1906
1907 if (A < B) {
1908 pushInteger(S, -1, Call->getType());
1909 return true;
1910 } else if (A > B) {
1911 pushInteger(S, 1, Call->getType());
1912 return true;
1913 }
1914 }
1915 }
1916
1917 // We compared CmpSize bytes above. If the limiting factor was the Size
1918 // passed, we're done and the result is equality (0).
1919 if (ByteSize <= CmpSize) {
1920 pushInteger(S, 0, Call->getType());
1921 return true;
1922 }
1923
1924 // However, if we read all the available bytes but were instructed to read
1925 // even more, diagnose this as a "read of dereferenced one-past-the-end
1926 // pointer". This is what would happen if we called CheckLoad() on every array
1927 // element.
1928 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_past_end)
1929 << AK_Read << S.Current->getRange(OpPC);
1930 return false;
1931}
1932
1933// __builtin_memchr(ptr, int, int)
1934// __builtin_strchr(ptr, int)
1935static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC,
1936 const CallExpr *Call, unsigned ID) {
1937 if (ID == Builtin::BImemchr || ID == Builtin::BIwcschr ||
1938 ID == Builtin::BIstrchr || ID == Builtin::BIwmemchr)
1939 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1940
1941 std::optional<APSInt> MaxLength;
1942 PrimType DesiredT = *S.getContext().classify(E: Call->getArg(Arg: 1));
1943 if (Call->getNumArgs() == 3) {
1944 PrimType MaxT = *S.getContext().classify(E: Call->getArg(Arg: 2));
1945 MaxLength = popToAPSInt(Stk&: S.Stk, T: MaxT);
1946 }
1947 APSInt Desired = popToAPSInt(Stk&: S.Stk, T: DesiredT);
1948 const Pointer &Ptr = S.Stk.pop<Pointer>();
1949
1950 if (MaxLength && MaxLength->isZero()) {
1951 S.Stk.push<Pointer>();
1952 return true;
1953 }
1954
1955 if (Ptr.isDummy())
1956 return false;
1957
1958 // Null is only okay if the given size is 0.
1959 if (Ptr.isZero()) {
1960 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_null)
1961 << AK_Read;
1962 return false;
1963 }
1964
1965 QualType ElemTy = Ptr.getFieldDesc()->isArray()
1966 ? Ptr.getFieldDesc()->getElemQualType()
1967 : Ptr.getFieldDesc()->getType();
1968 bool IsRawByte = ID == Builtin::BImemchr || ID == Builtin::BI__builtin_memchr;
1969
1970 // Give up on byte-oriented matching against multibyte elements.
1971 if (IsRawByte && !isOneByteCharacterType(T: ElemTy)) {
1972 S.FFDiag(S.Current->getSource(OpPC),
1973 diag::note_constexpr_memchr_unsupported)
1974 << S.getASTContext().BuiltinInfo.getQuotedName(ID) << ElemTy;
1975 return false;
1976 }
1977
1978 if (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr) {
1979 // strchr compares directly to the passed integer, and therefore
1980 // always fails if given an int that is not a char.
1981 if (Desired !=
1982 Desired.trunc(width: S.getASTContext().getCharWidth()).getSExtValue()) {
1983 S.Stk.push<Pointer>();
1984 return true;
1985 }
1986 }
1987
1988 uint64_t DesiredVal;
1989 if (ID == Builtin::BIwmemchr || ID == Builtin::BI__builtin_wmemchr ||
1990 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr) {
1991 // wcschr and wmemchr are given a wchar_t to look for. Just use it.
1992 DesiredVal = Desired.getZExtValue();
1993 } else {
1994 DesiredVal = Desired.trunc(width: S.getASTContext().getCharWidth()).getZExtValue();
1995 }
1996
1997 bool StopAtZero =
1998 (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr ||
1999 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr);
2000
2001 PrimType ElemT =
2002 IsRawByte ? PT_Sint8 : *S.getContext().classify(T: getElemType(P: Ptr));
2003
2004 size_t Index = Ptr.getIndex();
2005 size_t Step = 0;
2006 for (;;) {
2007 const Pointer &ElemPtr =
2008 (Index + Step) > 0 ? Ptr.atIndex(Idx: Index + Step) : Ptr;
2009
2010 if (!CheckLoad(S, OpPC, Ptr: ElemPtr))
2011 return false;
2012
2013 uint64_t V;
2014 INT_TYPE_SWITCH_NO_BOOL(
2015 ElemT, { V = static_cast<uint64_t>(ElemPtr.deref<T>().toUnsigned()); });
2016
2017 if (V == DesiredVal) {
2018 S.Stk.push<Pointer>(Args: ElemPtr);
2019 return true;
2020 }
2021
2022 if (StopAtZero && V == 0)
2023 break;
2024
2025 ++Step;
2026 if (MaxLength && Step == MaxLength->getZExtValue())
2027 break;
2028 }
2029
2030 S.Stk.push<Pointer>();
2031 return true;
2032}
2033
2034static unsigned computeFullDescSize(const ASTContext &ASTCtx,
2035 const Descriptor *Desc) {
2036
2037 if (Desc->isPrimitive())
2038 return ASTCtx.getTypeSizeInChars(T: Desc->getType()).getQuantity();
2039
2040 if (Desc->isArray())
2041 return ASTCtx.getTypeSizeInChars(T: Desc->getElemQualType()).getQuantity() *
2042 Desc->getNumElems();
2043
2044 if (Desc->isRecord())
2045 return ASTCtx.getTypeSizeInChars(T: Desc->getType()).getQuantity();
2046
2047 llvm_unreachable("Unhandled descriptor type");
2048 return 0;
2049}
2050
2051static unsigned computePointerOffset(const ASTContext &ASTCtx,
2052 const Pointer &Ptr) {
2053 unsigned Result = 0;
2054
2055 Pointer P = Ptr;
2056 while (P.isArrayElement() || P.isField()) {
2057 P = P.expand();
2058 const Descriptor *D = P.getFieldDesc();
2059
2060 if (P.isArrayElement()) {
2061 unsigned ElemSize =
2062 ASTCtx.getTypeSizeInChars(T: D->getElemQualType()).getQuantity();
2063 if (P.isOnePastEnd())
2064 Result += ElemSize * P.getNumElems();
2065 else
2066 Result += ElemSize * P.getIndex();
2067 P = P.expand().getArray();
2068 } else if (P.isBaseClass()) {
2069
2070 const auto *RD = cast<CXXRecordDecl>(Val: D->asDecl());
2071 bool IsVirtual = Ptr.isVirtualBaseClass();
2072 P = P.getBase();
2073 const Record *BaseRecord = P.getRecord();
2074
2075 const ASTRecordLayout &Layout =
2076 ASTCtx.getASTRecordLayout(cast<CXXRecordDecl>(Val: BaseRecord->getDecl()));
2077 if (IsVirtual)
2078 Result += Layout.getVBaseClassOffset(VBase: RD).getQuantity();
2079 else
2080 Result += Layout.getBaseClassOffset(Base: RD).getQuantity();
2081 } else if (P.isField()) {
2082 const FieldDecl *FD = P.getField();
2083 const ASTRecordLayout &Layout =
2084 ASTCtx.getASTRecordLayout(D: FD->getParent());
2085 unsigned FieldIndex = FD->getFieldIndex();
2086 uint64_t FieldOffset =
2087 ASTCtx.toCharUnitsFromBits(BitSize: Layout.getFieldOffset(FieldNo: FieldIndex))
2088 .getQuantity();
2089 Result += FieldOffset;
2090 P = P.getBase();
2091 } else
2092 llvm_unreachable("Unhandled descriptor type");
2093 }
2094
2095 return Result;
2096}
2097
2098static bool interp__builtin_object_size(InterpState &S, CodePtr OpPC,
2099 const InterpFrame *Frame,
2100 const CallExpr *Call) {
2101 PrimType KindT = *S.getContext().classify(E: Call->getArg(Arg: 1));
2102 [[maybe_unused]] unsigned Kind = popToAPSInt(Stk&: S.Stk, T: KindT).getZExtValue();
2103
2104 assert(Kind <= 3 && "unexpected kind");
2105
2106 const Pointer &Ptr = S.Stk.pop<Pointer>();
2107
2108 if (Ptr.isZero())
2109 return false;
2110
2111 const Descriptor *DeclDesc = Ptr.getDeclDesc();
2112 if (!DeclDesc)
2113 return false;
2114
2115 const ASTContext &ASTCtx = S.getASTContext();
2116
2117 unsigned ByteOffset = computePointerOffset(ASTCtx, Ptr);
2118 unsigned FullSize = computeFullDescSize(ASTCtx, Desc: DeclDesc);
2119
2120 pushInteger(S, FullSize - ByteOffset, Call->getType());
2121
2122 return true;
2123}
2124
2125static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC,
2126 const CallExpr *Call) {
2127
2128 if (!S.inConstantContext())
2129 return false;
2130
2131 const Pointer &Ptr = S.Stk.pop<Pointer>();
2132
2133 auto Error = [&](int Diag) {
2134 bool CalledFromStd = false;
2135 const auto *Callee = S.Current->getCallee();
2136 if (Callee && Callee->isInStdNamespace()) {
2137 const IdentifierInfo *Identifier = Callee->getIdentifier();
2138 CalledFromStd = Identifier && Identifier->isStr(Str: "is_within_lifetime");
2139 }
2140 S.CCEDiag(CalledFromStd
2141 ? S.Current->Caller->getSource(S.Current->getRetPC())
2142 : S.Current->getSource(OpPC),
2143 diag::err_invalid_is_within_lifetime)
2144 << (CalledFromStd ? "std::is_within_lifetime"
2145 : "__builtin_is_within_lifetime")
2146 << Diag;
2147 return false;
2148 };
2149
2150 if (Ptr.isZero())
2151 return Error(0);
2152 if (Ptr.isOnePastEnd())
2153 return Error(1);
2154
2155 bool Result = true;
2156 if (!Ptr.isActive()) {
2157 Result = false;
2158 } else {
2159 if (!CheckLive(S, OpPC, Ptr, AK: AK_Read))
2160 return false;
2161 if (!CheckMutable(S, OpPC, Ptr))
2162 return false;
2163 }
2164
2165 pushInteger(S, Result, Call->getType());
2166 return true;
2167}
2168
2169bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
2170 uint32_t BuiltinID) {
2171 if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(ID: BuiltinID))
2172 return Invalid(S, OpPC);
2173
2174 const InterpFrame *Frame = S.Current;
2175 switch (BuiltinID) {
2176 case Builtin::BI__builtin_is_constant_evaluated:
2177 return interp__builtin_is_constant_evaluated(S, OpPC, Frame, Call);
2178
2179 case Builtin::BI__builtin_assume:
2180 case Builtin::BI__assume:
2181 return interp__builtin_assume(S, OpPC, Frame, Call);
2182
2183 case Builtin::BI__builtin_strcmp:
2184 case Builtin::BIstrcmp:
2185 case Builtin::BI__builtin_strncmp:
2186 case Builtin::BIstrncmp:
2187 case Builtin::BI__builtin_wcsncmp:
2188 case Builtin::BIwcsncmp:
2189 case Builtin::BI__builtin_wcscmp:
2190 case Builtin::BIwcscmp:
2191 return interp__builtin_strcmp(S, OpPC, Frame, Call, ID: BuiltinID);
2192
2193 case Builtin::BI__builtin_strlen:
2194 case Builtin::BIstrlen:
2195 case Builtin::BI__builtin_wcslen:
2196 case Builtin::BIwcslen:
2197 return interp__builtin_strlen(S, OpPC, Frame, Call, ID: BuiltinID);
2198
2199 case Builtin::BI__builtin_nan:
2200 case Builtin::BI__builtin_nanf:
2201 case Builtin::BI__builtin_nanl:
2202 case Builtin::BI__builtin_nanf16:
2203 case Builtin::BI__builtin_nanf128:
2204 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/false);
2205
2206 case Builtin::BI__builtin_nans:
2207 case Builtin::BI__builtin_nansf:
2208 case Builtin::BI__builtin_nansl:
2209 case Builtin::BI__builtin_nansf16:
2210 case Builtin::BI__builtin_nansf128:
2211 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/true);
2212
2213 case Builtin::BI__builtin_huge_val:
2214 case Builtin::BI__builtin_huge_valf:
2215 case Builtin::BI__builtin_huge_vall:
2216 case Builtin::BI__builtin_huge_valf16:
2217 case Builtin::BI__builtin_huge_valf128:
2218 case Builtin::BI__builtin_inf:
2219 case Builtin::BI__builtin_inff:
2220 case Builtin::BI__builtin_infl:
2221 case Builtin::BI__builtin_inff16:
2222 case Builtin::BI__builtin_inff128:
2223 return interp__builtin_inf(S, OpPC, Frame, Call);
2224
2225 case Builtin::BI__builtin_copysign:
2226 case Builtin::BI__builtin_copysignf:
2227 case Builtin::BI__builtin_copysignl:
2228 case Builtin::BI__builtin_copysignf128:
2229 return interp__builtin_copysign(S, OpPC, Frame);
2230
2231 case Builtin::BI__builtin_fmin:
2232 case Builtin::BI__builtin_fminf:
2233 case Builtin::BI__builtin_fminl:
2234 case Builtin::BI__builtin_fminf16:
2235 case Builtin::BI__builtin_fminf128:
2236 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/false);
2237
2238 case Builtin::BI__builtin_fminimum_num:
2239 case Builtin::BI__builtin_fminimum_numf:
2240 case Builtin::BI__builtin_fminimum_numl:
2241 case Builtin::BI__builtin_fminimum_numf16:
2242 case Builtin::BI__builtin_fminimum_numf128:
2243 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/true);
2244
2245 case Builtin::BI__builtin_fmax:
2246 case Builtin::BI__builtin_fmaxf:
2247 case Builtin::BI__builtin_fmaxl:
2248 case Builtin::BI__builtin_fmaxf16:
2249 case Builtin::BI__builtin_fmaxf128:
2250 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/false);
2251
2252 case Builtin::BI__builtin_fmaximum_num:
2253 case Builtin::BI__builtin_fmaximum_numf:
2254 case Builtin::BI__builtin_fmaximum_numl:
2255 case Builtin::BI__builtin_fmaximum_numf16:
2256 case Builtin::BI__builtin_fmaximum_numf128:
2257 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/true);
2258
2259 case Builtin::BI__builtin_isnan:
2260 return interp__builtin_isnan(S, OpPC, Frame, Call);
2261
2262 case Builtin::BI__builtin_issignaling:
2263 return interp__builtin_issignaling(S, OpPC, Frame, Call);
2264
2265 case Builtin::BI__builtin_isinf:
2266 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/CheckSign: false, Call);
2267
2268 case Builtin::BI__builtin_isinf_sign:
2269 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/CheckSign: true, Call);
2270
2271 case Builtin::BI__builtin_isfinite:
2272 return interp__builtin_isfinite(S, OpPC, Frame, Call);
2273
2274 case Builtin::BI__builtin_isnormal:
2275 return interp__builtin_isnormal(S, OpPC, Frame, Call);
2276
2277 case Builtin::BI__builtin_issubnormal:
2278 return interp__builtin_issubnormal(S, OpPC, Frame, Call);
2279
2280 case Builtin::BI__builtin_iszero:
2281 return interp__builtin_iszero(S, OpPC, Frame, Call);
2282
2283 case Builtin::BI__builtin_signbit:
2284 case Builtin::BI__builtin_signbitf:
2285 case Builtin::BI__builtin_signbitl:
2286 return interp__builtin_signbit(S, OpPC, Frame, Call);
2287
2288 case Builtin::BI__builtin_isgreater:
2289 case Builtin::BI__builtin_isgreaterequal:
2290 case Builtin::BI__builtin_isless:
2291 case Builtin::BI__builtin_islessequal:
2292 case Builtin::BI__builtin_islessgreater:
2293 case Builtin::BI__builtin_isunordered:
2294 return interp_floating_comparison(S, OpPC, Call, ID: BuiltinID);
2295
2296 case Builtin::BI__builtin_isfpclass:
2297 return interp__builtin_isfpclass(S, OpPC, Frame, Call);
2298
2299 case Builtin::BI__builtin_fpclassify:
2300 return interp__builtin_fpclassify(S, OpPC, Frame, Call);
2301
2302 case Builtin::BI__builtin_fabs:
2303 case Builtin::BI__builtin_fabsf:
2304 case Builtin::BI__builtin_fabsl:
2305 case Builtin::BI__builtin_fabsf128:
2306 return interp__builtin_fabs(S, OpPC, Frame);
2307
2308 case Builtin::BI__builtin_abs:
2309 case Builtin::BI__builtin_labs:
2310 case Builtin::BI__builtin_llabs:
2311 return interp__builtin_abs(S, OpPC, Frame, Call);
2312
2313 case Builtin::BI__builtin_popcount:
2314 case Builtin::BI__builtin_popcountl:
2315 case Builtin::BI__builtin_popcountll:
2316 case Builtin::BI__builtin_popcountg:
2317 case Builtin::BI__popcnt16: // Microsoft variants of popcount
2318 case Builtin::BI__popcnt:
2319 case Builtin::BI__popcnt64:
2320 return interp__builtin_popcount(S, OpPC, Frame, Call);
2321
2322 case Builtin::BI__builtin_parity:
2323 case Builtin::BI__builtin_parityl:
2324 case Builtin::BI__builtin_parityll:
2325 return interp__builtin_parity(S, OpPC, Frame, Call);
2326
2327 case Builtin::BI__builtin_clrsb:
2328 case Builtin::BI__builtin_clrsbl:
2329 case Builtin::BI__builtin_clrsbll:
2330 return interp__builtin_clrsb(S, OpPC, Frame, Call);
2331
2332 case Builtin::BI__builtin_bitreverse8:
2333 case Builtin::BI__builtin_bitreverse16:
2334 case Builtin::BI__builtin_bitreverse32:
2335 case Builtin::BI__builtin_bitreverse64:
2336 return interp__builtin_bitreverse(S, OpPC, Frame, Call);
2337
2338 case Builtin::BI__builtin_classify_type:
2339 return interp__builtin_classify_type(S, OpPC, Frame, Call);
2340
2341 case Builtin::BI__builtin_expect:
2342 case Builtin::BI__builtin_expect_with_probability:
2343 return interp__builtin_expect(S, OpPC, Frame, Call);
2344
2345 case Builtin::BI__builtin_rotateleft8:
2346 case Builtin::BI__builtin_rotateleft16:
2347 case Builtin::BI__builtin_rotateleft32:
2348 case Builtin::BI__builtin_rotateleft64:
2349 case Builtin::BI_rotl8: // Microsoft variants of rotate left
2350 case Builtin::BI_rotl16:
2351 case Builtin::BI_rotl:
2352 case Builtin::BI_lrotl:
2353 case Builtin::BI_rotl64:
2354 return interp__builtin_rotate(S, OpPC, Frame, Call, /*Right=*/false);
2355
2356 case Builtin::BI__builtin_rotateright8:
2357 case Builtin::BI__builtin_rotateright16:
2358 case Builtin::BI__builtin_rotateright32:
2359 case Builtin::BI__builtin_rotateright64:
2360 case Builtin::BI_rotr8: // Microsoft variants of rotate right
2361 case Builtin::BI_rotr16:
2362 case Builtin::BI_rotr:
2363 case Builtin::BI_lrotr:
2364 case Builtin::BI_rotr64:
2365 return interp__builtin_rotate(S, OpPC, Frame, Call, /*Right=*/true);
2366
2367 case Builtin::BI__builtin_ffs:
2368 case Builtin::BI__builtin_ffsl:
2369 case Builtin::BI__builtin_ffsll:
2370 return interp__builtin_ffs(S, OpPC, Frame, Call);
2371
2372 case Builtin::BIaddressof:
2373 case Builtin::BI__addressof:
2374 case Builtin::BI__builtin_addressof:
2375 assert(isNoopBuiltin(BuiltinID));
2376 return interp__builtin_addressof(S, OpPC, Frame, Call);
2377
2378 case Builtin::BIas_const:
2379 case Builtin::BIforward:
2380 case Builtin::BIforward_like:
2381 case Builtin::BImove:
2382 case Builtin::BImove_if_noexcept:
2383 assert(isNoopBuiltin(BuiltinID));
2384 return interp__builtin_move(S, OpPC, Frame, Call);
2385
2386 case Builtin::BI__builtin_eh_return_data_regno:
2387 return interp__builtin_eh_return_data_regno(S, OpPC, Frame, Call);
2388
2389 case Builtin::BI__builtin_launder:
2390 assert(isNoopBuiltin(BuiltinID));
2391 return true;
2392
2393 case Builtin::BI__builtin_add_overflow:
2394 case Builtin::BI__builtin_sub_overflow:
2395 case Builtin::BI__builtin_mul_overflow:
2396 case Builtin::BI__builtin_sadd_overflow:
2397 case Builtin::BI__builtin_uadd_overflow:
2398 case Builtin::BI__builtin_uaddl_overflow:
2399 case Builtin::BI__builtin_uaddll_overflow:
2400 case Builtin::BI__builtin_usub_overflow:
2401 case Builtin::BI__builtin_usubl_overflow:
2402 case Builtin::BI__builtin_usubll_overflow:
2403 case Builtin::BI__builtin_umul_overflow:
2404 case Builtin::BI__builtin_umull_overflow:
2405 case Builtin::BI__builtin_umulll_overflow:
2406 case Builtin::BI__builtin_saddl_overflow:
2407 case Builtin::BI__builtin_saddll_overflow:
2408 case Builtin::BI__builtin_ssub_overflow:
2409 case Builtin::BI__builtin_ssubl_overflow:
2410 case Builtin::BI__builtin_ssubll_overflow:
2411 case Builtin::BI__builtin_smul_overflow:
2412 case Builtin::BI__builtin_smull_overflow:
2413 case Builtin::BI__builtin_smulll_overflow:
2414 return interp__builtin_overflowop(S, OpPC, Call, BuiltinOp: BuiltinID);
2415
2416 case Builtin::BI__builtin_addcb:
2417 case Builtin::BI__builtin_addcs:
2418 case Builtin::BI__builtin_addc:
2419 case Builtin::BI__builtin_addcl:
2420 case Builtin::BI__builtin_addcll:
2421 case Builtin::BI__builtin_subcb:
2422 case Builtin::BI__builtin_subcs:
2423 case Builtin::BI__builtin_subc:
2424 case Builtin::BI__builtin_subcl:
2425 case Builtin::BI__builtin_subcll:
2426 return interp__builtin_carryop(S, OpPC, Frame, Call, BuiltinOp: BuiltinID);
2427
2428 case Builtin::BI__builtin_clz:
2429 case Builtin::BI__builtin_clzl:
2430 case Builtin::BI__builtin_clzll:
2431 case Builtin::BI__builtin_clzs:
2432 case Builtin::BI__builtin_clzg:
2433 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
2434 case Builtin::BI__lzcnt:
2435 case Builtin::BI__lzcnt64:
2436 return interp__builtin_clz(S, OpPC, Frame, Call, BuiltinOp: BuiltinID);
2437
2438 case Builtin::BI__builtin_ctz:
2439 case Builtin::BI__builtin_ctzl:
2440 case Builtin::BI__builtin_ctzll:
2441 case Builtin::BI__builtin_ctzs:
2442 case Builtin::BI__builtin_ctzg:
2443 return interp__builtin_ctz(S, OpPC, Frame, Call, BuiltinID);
2444
2445 case Builtin::BI__builtin_bswap16:
2446 case Builtin::BI__builtin_bswap32:
2447 case Builtin::BI__builtin_bswap64:
2448 return interp__builtin_bswap(S, OpPC, Frame, Call);
2449
2450 case Builtin::BI__atomic_always_lock_free:
2451 case Builtin::BI__atomic_is_lock_free:
2452 return interp__builtin_atomic_lock_free(S, OpPC, Frame, Call, BuiltinOp: BuiltinID);
2453
2454 case Builtin::BI__c11_atomic_is_lock_free:
2455 return interp__builtin_c11_atomic_is_lock_free(S, OpPC, Frame, Call);
2456
2457 case Builtin::BI__builtin_complex:
2458 return interp__builtin_complex(S, OpPC, Frame, Call);
2459
2460 case Builtin::BI__builtin_is_aligned:
2461 case Builtin::BI__builtin_align_up:
2462 case Builtin::BI__builtin_align_down:
2463 return interp__builtin_is_aligned_up_down(S, OpPC, Frame, Call, BuiltinOp: BuiltinID);
2464
2465 case Builtin::BI__builtin_assume_aligned:
2466 return interp__builtin_assume_aligned(S, OpPC, Frame, Call);
2467
2468 case clang::X86::BI__builtin_ia32_bextr_u32:
2469 case clang::X86::BI__builtin_ia32_bextr_u64:
2470 case clang::X86::BI__builtin_ia32_bextri_u32:
2471 case clang::X86::BI__builtin_ia32_bextri_u64:
2472 return interp__builtin_ia32_bextr(S, OpPC, Frame, Call);
2473
2474 case clang::X86::BI__builtin_ia32_bzhi_si:
2475 case clang::X86::BI__builtin_ia32_bzhi_di:
2476 return interp__builtin_ia32_bzhi(S, OpPC, Frame, Call);
2477
2478 case clang::X86::BI__builtin_ia32_lzcnt_u16:
2479 case clang::X86::BI__builtin_ia32_lzcnt_u32:
2480 case clang::X86::BI__builtin_ia32_lzcnt_u64:
2481 return interp__builtin_ia32_lzcnt(S, OpPC, Frame, Call);
2482
2483 case clang::X86::BI__builtin_ia32_tzcnt_u16:
2484 case clang::X86::BI__builtin_ia32_tzcnt_u32:
2485 case clang::X86::BI__builtin_ia32_tzcnt_u64:
2486 return interp__builtin_ia32_tzcnt(S, OpPC, Frame, Call);
2487
2488 case clang::X86::BI__builtin_ia32_pdep_si:
2489 case clang::X86::BI__builtin_ia32_pdep_di:
2490 return interp__builtin_ia32_pdep(S, OpPC, Frame, Call);
2491
2492 case clang::X86::BI__builtin_ia32_pext_si:
2493 case clang::X86::BI__builtin_ia32_pext_di:
2494 return interp__builtin_ia32_pext(S, OpPC, Frame, Call);
2495
2496 case clang::X86::BI__builtin_ia32_addcarryx_u32:
2497 case clang::X86::BI__builtin_ia32_addcarryx_u64:
2498 case clang::X86::BI__builtin_ia32_subborrow_u32:
2499 case clang::X86::BI__builtin_ia32_subborrow_u64:
2500 return interp__builtin_ia32_addcarry_subborrow(S, OpPC, Frame, Call,
2501 BuiltinOp: BuiltinID);
2502
2503 case Builtin::BI__builtin_os_log_format_buffer_size:
2504 return interp__builtin_os_log_format_buffer_size(S, OpPC, Frame, Call);
2505
2506 case Builtin::BI__builtin_ptrauth_string_discriminator:
2507 return interp__builtin_ptrauth_string_discriminator(S, OpPC, Frame, Call);
2508
2509 case Builtin::BI__noop:
2510 pushInteger(S, 0, Call->getType());
2511 return true;
2512
2513 case Builtin::BI__builtin_operator_new:
2514 return interp__builtin_operator_new(S, OpPC, Frame, Call);
2515
2516 case Builtin::BI__builtin_operator_delete:
2517 return interp__builtin_operator_delete(S, OpPC, Frame, Call);
2518
2519 case Builtin::BI__arithmetic_fence:
2520 return interp__builtin_arithmetic_fence(S, OpPC, Frame, Call);
2521
2522 case Builtin::BI__builtin_reduce_add:
2523 case Builtin::BI__builtin_reduce_mul:
2524 case Builtin::BI__builtin_reduce_and:
2525 case Builtin::BI__builtin_reduce_or:
2526 case Builtin::BI__builtin_reduce_xor:
2527 return interp__builtin_vector_reduce(S, OpPC, Call, ID: BuiltinID);
2528
2529 case Builtin::BI__builtin_elementwise_popcount:
2530 return interp__builtin_elementwise_popcount(S, OpPC, Frame, Call);
2531
2532 case Builtin::BI__builtin_memcpy:
2533 case Builtin::BImemcpy:
2534 case Builtin::BI__builtin_wmemcpy:
2535 case Builtin::BIwmemcpy:
2536 case Builtin::BI__builtin_memmove:
2537 case Builtin::BImemmove:
2538 case Builtin::BI__builtin_wmemmove:
2539 case Builtin::BIwmemmove:
2540 return interp__builtin_memcpy(S, OpPC, Frame, Call, ID: BuiltinID);
2541
2542 case Builtin::BI__builtin_memcmp:
2543 case Builtin::BImemcmp:
2544 case Builtin::BI__builtin_bcmp:
2545 case Builtin::BIbcmp:
2546 case Builtin::BI__builtin_wmemcmp:
2547 case Builtin::BIwmemcmp:
2548 return interp__builtin_memcmp(S, OpPC, Frame, Call, ID: BuiltinID);
2549
2550 case Builtin::BImemchr:
2551 case Builtin::BI__builtin_memchr:
2552 case Builtin::BIstrchr:
2553 case Builtin::BI__builtin_strchr:
2554 case Builtin::BIwmemchr:
2555 case Builtin::BI__builtin_wmemchr:
2556 case Builtin::BIwcschr:
2557 case Builtin::BI__builtin_wcschr:
2558 case Builtin::BI__builtin_char_memchr:
2559 return interp__builtin_memchr(S, OpPC, Call, ID: BuiltinID);
2560
2561 case Builtin::BI__builtin_object_size:
2562 case Builtin::BI__builtin_dynamic_object_size:
2563 return interp__builtin_object_size(S, OpPC, Frame, Call);
2564
2565 case Builtin::BI__builtin_is_within_lifetime:
2566 return interp__builtin_is_within_lifetime(S, OpPC, Call);
2567
2568 default:
2569 S.FFDiag(S.Current->getLocation(OpPC),
2570 diag::note_invalid_subexpr_in_const_expr)
2571 << S.Current->getRange(OpPC);
2572
2573 return false;
2574 }
2575
2576 llvm_unreachable("Unhandled builtin ID");
2577}
2578
2579bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
2580 llvm::ArrayRef<int64_t> ArrayIndices,
2581 int64_t &IntResult) {
2582 CharUnits Result;
2583 unsigned N = E->getNumComponents();
2584 assert(N > 0);
2585
2586 unsigned ArrayIndex = 0;
2587 QualType CurrentType = E->getTypeSourceInfo()->getType();
2588 for (unsigned I = 0; I != N; ++I) {
2589 const OffsetOfNode &Node = E->getComponent(Idx: I);
2590 switch (Node.getKind()) {
2591 case OffsetOfNode::Field: {
2592 const FieldDecl *MemberDecl = Node.getField();
2593 const RecordType *RT = CurrentType->getAs<RecordType>();
2594 if (!RT)
2595 return false;
2596 const RecordDecl *RD = RT->getDecl();
2597 if (RD->isInvalidDecl())
2598 return false;
2599 const ASTRecordLayout &RL = S.getASTContext().getASTRecordLayout(D: RD);
2600 unsigned FieldIndex = MemberDecl->getFieldIndex();
2601 assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
2602 Result +=
2603 S.getASTContext().toCharUnitsFromBits(BitSize: RL.getFieldOffset(FieldNo: FieldIndex));
2604 CurrentType = MemberDecl->getType().getNonReferenceType();
2605 break;
2606 }
2607 case OffsetOfNode::Array: {
2608 // When generating bytecode, we put all the index expressions as Sint64 on
2609 // the stack.
2610 int64_t Index = ArrayIndices[ArrayIndex];
2611 const ArrayType *AT = S.getASTContext().getAsArrayType(T: CurrentType);
2612 if (!AT)
2613 return false;
2614 CurrentType = AT->getElementType();
2615 CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(T: CurrentType);
2616 Result += Index * ElementSize;
2617 ++ArrayIndex;
2618 break;
2619 }
2620 case OffsetOfNode::Base: {
2621 const CXXBaseSpecifier *BaseSpec = Node.getBase();
2622 if (BaseSpec->isVirtual())
2623 return false;
2624
2625 // Find the layout of the class whose base we are looking into.
2626 const RecordType *RT = CurrentType->getAs<RecordType>();
2627 if (!RT)
2628 return false;
2629 const RecordDecl *RD = RT->getDecl();
2630 if (RD->isInvalidDecl())
2631 return false;
2632 const ASTRecordLayout &RL = S.getASTContext().getASTRecordLayout(D: RD);
2633
2634 // Find the base class itself.
2635 CurrentType = BaseSpec->getType();
2636 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2637 if (!BaseRT)
2638 return false;
2639
2640 // Add the offset to the base.
2641 Result += RL.getBaseClassOffset(Base: cast<CXXRecordDecl>(Val: BaseRT->getDecl()));
2642 break;
2643 }
2644 case OffsetOfNode::Identifier:
2645 llvm_unreachable("Dependent OffsetOfExpr?");
2646 }
2647 }
2648
2649 IntResult = Result.getQuantity();
2650
2651 return true;
2652}
2653
2654bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC,
2655 const Pointer &Ptr, const APSInt &IntValue) {
2656
2657 const Record *R = Ptr.getRecord();
2658 assert(R);
2659 assert(R->getNumFields() == 1);
2660
2661 unsigned FieldOffset = R->getField(I: 0u)->Offset;
2662 const Pointer &FieldPtr = Ptr.atField(Off: FieldOffset);
2663 PrimType FieldT = *S.getContext().classify(T: FieldPtr.getType());
2664
2665 INT_TYPE_SWITCH(FieldT,
2666 FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
2667 FieldPtr.initialize();
2668 return true;
2669}
2670
2671static void zeroAll(Pointer &Dest) {
2672 const Descriptor *Desc = Dest.getFieldDesc();
2673
2674 if (Desc->isPrimitive()) {
2675 TYPE_SWITCH(Desc->getPrimType(), {
2676 Dest.deref<T>().~T();
2677 new (&Dest.deref<T>()) T();
2678 });
2679 return;
2680 }
2681
2682 if (Desc->isRecord()) {
2683 const Record *R = Desc->ElemRecord;
2684 for (const Record::Field &F : R->fields()) {
2685 Pointer FieldPtr = Dest.atField(Off: F.Offset);
2686 zeroAll(Dest&: FieldPtr);
2687 }
2688 return;
2689 }
2690
2691 if (Desc->isPrimitiveArray()) {
2692 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
2693 TYPE_SWITCH(Desc->getPrimType(), {
2694 Dest.deref<T>().~T();
2695 new (&Dest.deref<T>()) T();
2696 });
2697 }
2698 return;
2699 }
2700
2701 if (Desc->isCompositeArray()) {
2702 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
2703 Pointer ElemPtr = Dest.atIndex(Idx: I).narrow();
2704 zeroAll(Dest&: ElemPtr);
2705 }
2706 return;
2707 }
2708}
2709
2710static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
2711 Pointer &Dest, bool Activate);
2712static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src,
2713 Pointer &Dest, bool Activate = false) {
2714 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
2715 const Descriptor *DestDesc = Dest.getFieldDesc();
2716
2717 auto copyField = [&](const Record::Field &F, bool Activate) -> bool {
2718 Pointer DestField = Dest.atField(Off: F.Offset);
2719 if (std::optional<PrimType> FT = S.Ctx.classify(F.Decl->getType())) {
2720 TYPE_SWITCH(*FT, {
2721 DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
2722 if (Src.atField(F.Offset).isInitialized())
2723 DestField.initialize();
2724 if (Activate)
2725 DestField.activate();
2726 });
2727 return true;
2728 }
2729 // Composite field.
2730 return copyComposite(S, OpPC, Src: Src.atField(Off: F.Offset), Dest&: DestField, Activate);
2731 };
2732
2733 assert(SrcDesc->isRecord());
2734 assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
2735 const Record *R = DestDesc->ElemRecord;
2736 for (const Record::Field &F : R->fields()) {
2737 if (R->isUnion()) {
2738 // For unions, only copy the active field. Zero all others.
2739 const Pointer &SrcField = Src.atField(Off: F.Offset);
2740 if (SrcField.isActive()) {
2741 if (!copyField(F, /*Activate=*/true))
2742 return false;
2743 } else {
2744 Pointer DestField = Dest.atField(Off: F.Offset);
2745 zeroAll(Dest&: DestField);
2746 }
2747 } else {
2748 if (!copyField(F, Activate))
2749 return false;
2750 }
2751 }
2752
2753 for (const Record::Base &B : R->bases()) {
2754 Pointer DestBase = Dest.atField(Off: B.Offset);
2755 if (!copyRecord(S, OpPC, Src: Src.atField(Off: B.Offset), Dest&: DestBase, Activate))
2756 return false;
2757 }
2758
2759 Dest.initialize();
2760 return true;
2761}
2762
2763static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
2764 Pointer &Dest, bool Activate = false) {
2765 assert(Src.isLive() && Dest.isLive());
2766
2767 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
2768 const Descriptor *DestDesc = Dest.getFieldDesc();
2769
2770 assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
2771
2772 if (DestDesc->isPrimitiveArray()) {
2773 assert(SrcDesc->isPrimitiveArray());
2774 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
2775 PrimType ET = DestDesc->getPrimType();
2776 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
2777 Pointer DestElem = Dest.atIndex(Idx: I);
2778 TYPE_SWITCH(ET, {
2779 DestElem.deref<T>() = Src.atIndex(I).deref<T>();
2780 DestElem.initialize();
2781 });
2782 }
2783 return true;
2784 }
2785
2786 if (DestDesc->isCompositeArray()) {
2787 assert(SrcDesc->isCompositeArray());
2788 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
2789 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
2790 const Pointer &SrcElem = Src.atIndex(Idx: I).narrow();
2791 Pointer DestElem = Dest.atIndex(Idx: I).narrow();
2792 if (!copyComposite(S, OpPC, Src: SrcElem, Dest&: DestElem, Activate))
2793 return false;
2794 }
2795 return true;
2796 }
2797
2798 if (DestDesc->isRecord())
2799 return copyRecord(S, OpPC, Src, Dest, Activate);
2800 return Invalid(S, OpPC);
2801}
2802
2803bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
2804 return copyComposite(S, OpPC, Src, Dest);
2805}
2806
2807} // namespace interp
2808} // namespace clang
2809

Provided by KDAB

Privacy Policy
Learn to use CMake with our Intro Training
Find out more

source code of clang/lib/AST/ByteCode/InterpBuiltin.cpp