1//===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8#include "../ExprConstShared.h"
9#include "Boolean.h"
10#include "Compiler.h"
11#include "EvalEmitter.h"
12#include "Interp.h"
13#include "InterpBuiltinBitCast.h"
14#include "PrimType.h"
15#include "clang/AST/OSLog.h"
16#include "clang/AST/RecordLayout.h"
17#include "clang/Basic/Builtins.h"
18#include "clang/Basic/TargetBuiltins.h"
19#include "clang/Basic/TargetInfo.h"
20#include "llvm/ADT/StringExtras.h"
21#include "llvm/Support/ErrorHandling.h"
22#include "llvm/Support/SipHash.h"
23
24namespace clang {
25namespace interp {
26
27LLVM_ATTRIBUTE_UNUSED static bool isNoopBuiltin(unsigned ID) {
28 switch (ID) {
29 case Builtin::BIas_const:
30 case Builtin::BIforward:
31 case Builtin::BIforward_like:
32 case Builtin::BImove:
33 case Builtin::BImove_if_noexcept:
34 case Builtin::BIaddressof:
35 case Builtin::BI__addressof:
36 case Builtin::BI__builtin_addressof:
37 case Builtin::BI__builtin_launder:
38 return true;
39 default:
40 return false;
41 }
42 return false;
43}
44
45static void discard(InterpStack &Stk, PrimType T) {
46 TYPE_SWITCH(T, { Stk.discard<T>(); });
47}
48
49static APSInt popToAPSInt(InterpStack &Stk, PrimType T) {
50 INT_TYPE_SWITCH(T, return Stk.pop<T>().toAPSInt());
51}
52
53/// Pushes \p Val on the stack as the type given by \p QT.
54static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
55 assert(QT->isSignedIntegerOrEnumerationType() ||
56 QT->isUnsignedIntegerOrEnumerationType());
57 std::optional<PrimType> T = S.getContext().classify(T: QT);
58 assert(T);
59
60 unsigned BitWidth = S.getASTContext().getTypeSize(T: QT);
61
62 if (T == PT_IntAPS) {
63 auto Result = S.allocAP<IntegralAP<true>>(BitWidth);
64 Result.copy(V: Val);
65 S.Stk.push<IntegralAP<true>>(Args&: Result);
66 return;
67 }
68
69 if (T == PT_IntAP) {
70 auto Result = S.allocAP<IntegralAP<false>>(BitWidth);
71 Result.copy(V: Val);
72 S.Stk.push<IntegralAP<false>>(Args&: Result);
73 return;
74 }
75
76 if (QT->isSignedIntegerOrEnumerationType()) {
77 int64_t V = Val.getSExtValue();
78 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
79 } else {
80 assert(QT->isUnsignedIntegerOrEnumerationType());
81 uint64_t V = Val.getZExtValue();
82 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
83 }
84}
85
86template <typename T>
87static void pushInteger(InterpState &S, T Val, QualType QT) {
88 if constexpr (std::is_same_v<T, APInt>)
89 pushInteger(S, Val: APSInt(Val, !std::is_signed_v<T>), QT);
90 else if constexpr (std::is_same_v<T, APSInt>)
91 pushInteger(S, Val, QT);
92 else
93 pushInteger(S,
94 Val: APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
95 std::is_signed_v<T>),
96 !std::is_signed_v<T>),
97 QT);
98}
99
100static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT,
101 const APSInt &Value) {
102
103 if (ValueT == PT_IntAPS) {
104 Dest.deref<IntegralAP<true>>() =
105 S.allocAP<IntegralAP<true>>(BitWidth: Value.getBitWidth());
106 Dest.deref<IntegralAP<true>>().copy(V: Value);
107 } else if (ValueT == PT_IntAP) {
108 Dest.deref<IntegralAP<false>>() =
109 S.allocAP<IntegralAP<false>>(BitWidth: Value.getBitWidth());
110 Dest.deref<IntegralAP<false>>().copy(V: Value);
111 } else {
112 INT_TYPE_SWITCH_NO_BOOL(
113 ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
114 }
115}
116
117static QualType getElemType(const Pointer &P) {
118 const Descriptor *Desc = P.getFieldDesc();
119 QualType T = Desc->getType();
120 if (Desc->isPrimitive())
121 return T;
122 if (T->isPointerType())
123 return T->getAs<PointerType>()->getPointeeType();
124 if (Desc->isArray())
125 return Desc->getElemQualType();
126 if (const auto *AT = T->getAsArrayTypeUnsafe())
127 return AT->getElementType();
128 return T;
129}
130
131static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC,
132 unsigned ID) {
133 if (!S.diagnosing())
134 return;
135
136 auto Loc = S.Current->getSource(PC: OpPC);
137 if (S.getLangOpts().CPlusPlus11)
138 S.CCEDiag(SI: Loc, DiagId: diag::note_constexpr_invalid_function)
139 << /*isConstexpr=*/0 << /*isConstructor=*/0
140 << S.getASTContext().BuiltinInfo.getQuotedName(ID);
141 else
142 S.CCEDiag(SI: Loc, DiagId: diag::note_invalid_subexpr_in_const_expr);
143}
144
145static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC,
146 const InterpFrame *Frame,
147 const CallExpr *Call) {
148 unsigned Depth = S.Current->getDepth();
149 auto isStdCall = [](const FunctionDecl *F) -> bool {
150 return F && F->isInStdNamespace() && F->getIdentifier() &&
151 F->getIdentifier()->isStr(Str: "is_constant_evaluated");
152 };
153 const InterpFrame *Caller = Frame->Caller;
154 // The current frame is the one for __builtin_is_constant_evaluated.
155 // The one above that, potentially the one for std::is_constant_evaluated().
156 if (S.inConstantContext() && !S.checkingPotentialConstantExpression() &&
157 S.getEvalStatus().Diag &&
158 (Depth == 0 || (Depth == 1 && isStdCall(Frame->getCallee())))) {
159 if (Caller && isStdCall(Frame->getCallee())) {
160 const Expr *E = Caller->getExpr(PC: Caller->getRetPC());
161 S.report(Loc: E->getExprLoc(),
162 DiagId: diag::warn_is_constant_evaluated_always_true_constexpr)
163 << "std::is_constant_evaluated" << E->getSourceRange();
164 } else {
165 S.report(Loc: Call->getExprLoc(),
166 DiagId: diag::warn_is_constant_evaluated_always_true_constexpr)
167 << "__builtin_is_constant_evaluated" << Call->getSourceRange();
168 }
169 }
170
171 S.Stk.push<Boolean>(Args: Boolean::from(Value: S.inConstantContext()));
172 return true;
173}
174
175// __builtin_assume(int)
176static bool interp__builtin_assume(InterpState &S, CodePtr OpPC,
177 const InterpFrame *Frame,
178 const CallExpr *Call) {
179 assert(Call->getNumArgs() == 1);
180 discard(Stk&: S.Stk, T: *S.getContext().classify(E: Call->getArg(Arg: 0)));
181 return true;
182}
183
184static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC,
185 const InterpFrame *Frame,
186 const CallExpr *Call, unsigned ID) {
187 uint64_t Limit = ~static_cast<uint64_t>(0);
188 if (ID == Builtin::BIstrncmp || ID == Builtin::BI__builtin_strncmp ||
189 ID == Builtin::BIwcsncmp || ID == Builtin::BI__builtin_wcsncmp)
190 Limit = popToAPSInt(Stk&: S.Stk, T: *S.getContext().classify(E: Call->getArg(Arg: 2)))
191 .getZExtValue();
192
193 const Pointer &B = S.Stk.pop<Pointer>();
194 const Pointer &A = S.Stk.pop<Pointer>();
195 if (ID == Builtin::BIstrcmp || ID == Builtin::BIstrncmp ||
196 ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp)
197 diagnoseNonConstexprBuiltin(S, OpPC, ID);
198
199 if (Limit == 0) {
200 pushInteger(S, Val: 0, QT: Call->getType());
201 return true;
202 }
203
204 if (!CheckLive(S, OpPC, Ptr: A, AK: AK_Read) || !CheckLive(S, OpPC, Ptr: B, AK: AK_Read))
205 return false;
206
207 if (A.isDummy() || B.isDummy())
208 return false;
209
210 bool IsWide = ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp ||
211 ID == Builtin::BI__builtin_wcscmp ||
212 ID == Builtin::BI__builtin_wcsncmp;
213 assert(A.getFieldDesc()->isPrimitiveArray());
214 assert(B.getFieldDesc()->isPrimitiveArray());
215
216 assert(getElemType(A).getTypePtr() == getElemType(B).getTypePtr());
217 PrimType ElemT = *S.getContext().classify(T: getElemType(P: A));
218
219 auto returnResult = [&](int V) -> bool {
220 pushInteger(S, Val: V, QT: Call->getType());
221 return true;
222 };
223
224 unsigned IndexA = A.getIndex();
225 unsigned IndexB = B.getIndex();
226 uint64_t Steps = 0;
227 for (;; ++IndexA, ++IndexB, ++Steps) {
228
229 if (Steps >= Limit)
230 break;
231 const Pointer &PA = A.atIndex(Idx: IndexA);
232 const Pointer &PB = B.atIndex(Idx: IndexB);
233 if (!CheckRange(S, OpPC, Ptr: PA, AK: AK_Read) ||
234 !CheckRange(S, OpPC, Ptr: PB, AK: AK_Read)) {
235 return false;
236 }
237
238 if (IsWide) {
239 INT_TYPE_SWITCH(ElemT, {
240 T CA = PA.deref<T>();
241 T CB = PB.deref<T>();
242 if (CA > CB)
243 return returnResult(1);
244 else if (CA < CB)
245 return returnResult(-1);
246 else if (CA.isZero() || CB.isZero())
247 return returnResult(0);
248 });
249 continue;
250 }
251
252 uint8_t CA = PA.deref<uint8_t>();
253 uint8_t CB = PB.deref<uint8_t>();
254
255 if (CA > CB)
256 return returnResult(1);
257 else if (CA < CB)
258 return returnResult(-1);
259 if (CA == 0 || CB == 0)
260 return returnResult(0);
261 }
262
263 return returnResult(0);
264}
265
266static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC,
267 const InterpFrame *Frame,
268 const CallExpr *Call, unsigned ID) {
269 const Pointer &StrPtr = S.Stk.pop<Pointer>();
270
271 if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen)
272 diagnoseNonConstexprBuiltin(S, OpPC, ID);
273
274 if (!CheckArray(S, OpPC, Ptr: StrPtr))
275 return false;
276
277 if (!CheckLive(S, OpPC, Ptr: StrPtr, AK: AK_Read))
278 return false;
279
280 if (!CheckDummy(S, OpPC, Ptr: StrPtr, AK: AK_Read))
281 return false;
282
283 assert(StrPtr.getFieldDesc()->isPrimitiveArray());
284 unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize();
285
286 if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) {
287 [[maybe_unused]] const ASTContext &AC = S.getASTContext();
288 assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity());
289 }
290
291 size_t Len = 0;
292 for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
293 const Pointer &ElemPtr = StrPtr.atIndex(Idx: I);
294
295 if (!CheckRange(S, OpPC, Ptr: ElemPtr, AK: AK_Read))
296 return false;
297
298 uint32_t Val;
299 switch (ElemSize) {
300 case 1:
301 Val = ElemPtr.deref<uint8_t>();
302 break;
303 case 2:
304 Val = ElemPtr.deref<uint16_t>();
305 break;
306 case 4:
307 Val = ElemPtr.deref<uint32_t>();
308 break;
309 default:
310 llvm_unreachable("Unsupported char size");
311 }
312 if (Val == 0)
313 break;
314 }
315
316 pushInteger(S, Val: Len, QT: Call->getType());
317
318 return true;
319}
320
321static bool interp__builtin_nan(InterpState &S, CodePtr OpPC,
322 const InterpFrame *Frame, const CallExpr *Call,
323 bool Signaling) {
324 const Pointer &Arg = S.Stk.pop<Pointer>();
325
326 if (!CheckLoad(S, OpPC, Ptr: Arg))
327 return false;
328
329 assert(Arg.getFieldDesc()->isPrimitiveArray());
330
331 // Convert the given string to an integer using StringRef's API.
332 llvm::APInt Fill;
333 std::string Str;
334 assert(Arg.getNumElems() >= 1);
335 for (unsigned I = 0;; ++I) {
336 const Pointer &Elem = Arg.atIndex(Idx: I);
337
338 if (!CheckLoad(S, OpPC, Ptr: Elem))
339 return false;
340
341 if (Elem.deref<int8_t>() == 0)
342 break;
343
344 Str += Elem.deref<char>();
345 }
346
347 // Treat empty strings as if they were zero.
348 if (Str.empty())
349 Fill = llvm::APInt(32, 0);
350 else if (StringRef(Str).getAsInteger(Radix: 0, Result&: Fill))
351 return false;
352
353 const llvm::fltSemantics &TargetSemantics =
354 S.getASTContext().getFloatTypeSemantics(
355 T: Call->getDirectCallee()->getReturnType());
356
357 Floating Result = S.allocFloat(Sem: TargetSemantics);
358 if (S.getASTContext().getTargetInfo().isNan2008()) {
359 if (Signaling)
360 Result.copy(
361 F: llvm::APFloat::getSNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill));
362 else
363 Result.copy(
364 F: llvm::APFloat::getQNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill));
365 } else {
366 // Prior to IEEE 754-2008, architectures were allowed to choose whether
367 // the first bit of their significand was set for qNaN or sNaN. MIPS chose
368 // a different encoding to what became a standard in 2008, and for pre-
369 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
370 // sNaN. This is now known as "legacy NaN" encoding.
371 if (Signaling)
372 Result.copy(
373 F: llvm::APFloat::getQNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill));
374 else
375 Result.copy(
376 F: llvm::APFloat::getSNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill));
377 }
378
379 S.Stk.push<Floating>(Args&: Result);
380 return true;
381}
382
383static bool interp__builtin_inf(InterpState &S, CodePtr OpPC,
384 const InterpFrame *Frame,
385 const CallExpr *Call) {
386 const llvm::fltSemantics &TargetSemantics =
387 S.getASTContext().getFloatTypeSemantics(
388 T: Call->getDirectCallee()->getReturnType());
389
390 Floating Result = S.allocFloat(Sem: TargetSemantics);
391 Result.copy(F: APFloat::getInf(Sem: TargetSemantics));
392 S.Stk.push<Floating>(Args&: Result);
393 return true;
394}
395
396static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC,
397 const InterpFrame *Frame) {
398 const Floating &Arg2 = S.Stk.pop<Floating>();
399 const Floating &Arg1 = S.Stk.pop<Floating>();
400 Floating Result = S.allocFloat(Sem: Arg1.getSemantics());
401
402 APFloat Copy = Arg1.getAPFloat();
403 Copy.copySign(RHS: Arg2.getAPFloat());
404 Result.copy(F: Copy);
405 S.Stk.push<Floating>(Args&: Result);
406
407 return true;
408}
409
410static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC,
411 const InterpFrame *Frame, bool IsNumBuiltin) {
412 const Floating &RHS = S.Stk.pop<Floating>();
413 const Floating &LHS = S.Stk.pop<Floating>();
414 Floating Result = S.allocFloat(Sem: LHS.getSemantics());
415
416 if (IsNumBuiltin)
417 Result.copy(F: llvm::minimumnum(A: LHS.getAPFloat(), B: RHS.getAPFloat()));
418 else
419 Result.copy(F: minnum(A: LHS.getAPFloat(), B: RHS.getAPFloat()));
420 S.Stk.push<Floating>(Args&: Result);
421 return true;
422}
423
424static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC,
425 const InterpFrame *Frame, bool IsNumBuiltin) {
426 const Floating &RHS = S.Stk.pop<Floating>();
427 const Floating &LHS = S.Stk.pop<Floating>();
428 Floating Result = S.allocFloat(Sem: LHS.getSemantics());
429
430 if (IsNumBuiltin)
431 Result.copy(F: llvm::maximumnum(A: LHS.getAPFloat(), B: RHS.getAPFloat()));
432 else
433 Result.copy(F: maxnum(A: LHS.getAPFloat(), B: RHS.getAPFloat()));
434 S.Stk.push<Floating>(Args&: Result);
435 return true;
436}
437
438/// Defined as __builtin_isnan(...), to accommodate the fact that it can
439/// take a float, double, long double, etc.
440/// But for us, that's all a Floating anyway.
441static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC,
442 const InterpFrame *Frame,
443 const CallExpr *Call) {
444 const Floating &Arg = S.Stk.pop<Floating>();
445
446 pushInteger(S, Val: Arg.isNan(), QT: Call->getType());
447 return true;
448}
449
450static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC,
451 const InterpFrame *Frame,
452 const CallExpr *Call) {
453 const Floating &Arg = S.Stk.pop<Floating>();
454
455 pushInteger(S, Val: Arg.isSignaling(), QT: Call->getType());
456 return true;
457}
458
459static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC,
460 const InterpFrame *Frame, bool CheckSign,
461 const CallExpr *Call) {
462 const Floating &Arg = S.Stk.pop<Floating>();
463 bool IsInf = Arg.isInf();
464
465 if (CheckSign)
466 pushInteger(S, Val: IsInf ? (Arg.isNegative() ? -1 : 1) : 0, QT: Call->getType());
467 else
468 pushInteger(S, Val: Arg.isInf(), QT: Call->getType());
469 return true;
470}
471
472static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC,
473 const InterpFrame *Frame,
474 const CallExpr *Call) {
475 const Floating &Arg = S.Stk.pop<Floating>();
476
477 pushInteger(S, Val: Arg.isFinite(), QT: Call->getType());
478 return true;
479}
480
481static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC,
482 const InterpFrame *Frame,
483 const CallExpr *Call) {
484 const Floating &Arg = S.Stk.pop<Floating>();
485
486 pushInteger(S, Val: Arg.isNormal(), QT: Call->getType());
487 return true;
488}
489
490static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC,
491 const InterpFrame *Frame,
492 const CallExpr *Call) {
493 const Floating &Arg = S.Stk.pop<Floating>();
494
495 pushInteger(S, Val: Arg.isDenormal(), QT: Call->getType());
496 return true;
497}
498
499static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC,
500 const InterpFrame *Frame,
501 const CallExpr *Call) {
502 const Floating &Arg = S.Stk.pop<Floating>();
503
504 pushInteger(S, Val: Arg.isZero(), QT: Call->getType());
505 return true;
506}
507
508static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC,
509 const InterpFrame *Frame,
510 const CallExpr *Call) {
511 const Floating &Arg = S.Stk.pop<Floating>();
512
513 pushInteger(S, Val: Arg.isNegative(), QT: Call->getType());
514 return true;
515}
516
517static bool interp_floating_comparison(InterpState &S, CodePtr OpPC,
518 const CallExpr *Call, unsigned ID) {
519 const Floating &RHS = S.Stk.pop<Floating>();
520 const Floating &LHS = S.Stk.pop<Floating>();
521
522 pushInteger(
523 S,
524 Val: [&] {
525 switch (ID) {
526 case Builtin::BI__builtin_isgreater:
527 return LHS > RHS;
528 case Builtin::BI__builtin_isgreaterequal:
529 return LHS >= RHS;
530 case Builtin::BI__builtin_isless:
531 return LHS < RHS;
532 case Builtin::BI__builtin_islessequal:
533 return LHS <= RHS;
534 case Builtin::BI__builtin_islessgreater: {
535 ComparisonCategoryResult cmp = LHS.compare(RHS);
536 return cmp == ComparisonCategoryResult::Less ||
537 cmp == ComparisonCategoryResult::Greater;
538 }
539 case Builtin::BI__builtin_isunordered:
540 return LHS.compare(RHS) == ComparisonCategoryResult::Unordered;
541 default:
542 llvm_unreachable("Unexpected builtin ID: Should be a floating point "
543 "comparison function");
544 }
545 }(),
546 QT: Call->getType());
547 return true;
548}
549
550/// First parameter to __builtin_isfpclass is the floating value, the
551/// second one is an integral value.
552static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC,
553 const InterpFrame *Frame,
554 const CallExpr *Call) {
555 PrimType FPClassArgT = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType());
556 APSInt FPClassArg = popToAPSInt(Stk&: S.Stk, T: FPClassArgT);
557 const Floating &F = S.Stk.pop<Floating>();
558
559 int32_t Result = static_cast<int32_t>(
560 (F.classify() & std::move(FPClassArg)).getZExtValue());
561 pushInteger(S, Val: Result, QT: Call->getType());
562
563 return true;
564}
565
566/// Five int values followed by one floating value.
567/// __builtin_fpclassify(int, int, int, int, int, float)
568static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC,
569 const InterpFrame *Frame,
570 const CallExpr *Call) {
571 const Floating &Val = S.Stk.pop<Floating>();
572
573 PrimType IntT = *S.getContext().classify(E: Call->getArg(Arg: 0));
574 APSInt Values[5];
575 for (unsigned I = 0; I != 5; ++I)
576 Values[4 - I] = popToAPSInt(Stk&: S.Stk, T: IntT);
577
578 unsigned Index;
579 switch (Val.getCategory()) {
580 case APFloat::fcNaN:
581 Index = 0;
582 break;
583 case APFloat::fcInfinity:
584 Index = 1;
585 break;
586 case APFloat::fcNormal:
587 Index = Val.isDenormal() ? 3 : 2;
588 break;
589 case APFloat::fcZero:
590 Index = 4;
591 break;
592 }
593
594 // The last argument is first on the stack.
595 assert(Index <= 4);
596
597 pushInteger(S, Val: Values[Index], QT: Call->getType());
598 return true;
599}
600
601// The C standard says "fabs raises no floating-point exceptions,
602// even if x is a signaling NaN. The returned value is independent of
603// the current rounding direction mode." Therefore constant folding can
604// proceed without regard to the floating point settings.
605// Reference, WG14 N2478 F.10.4.3
606static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC,
607 const InterpFrame *Frame) {
608 const Floating &Val = S.Stk.pop<Floating>();
609 APFloat F = Val.getAPFloat();
610 if (!F.isNegative()) {
611 S.Stk.push<Floating>(Args: Val);
612 return true;
613 }
614
615 Floating Result = S.allocFloat(Sem: Val.getSemantics());
616 F.changeSign();
617 Result.copy(F);
618 S.Stk.push<Floating>(Args&: Result);
619 return true;
620}
621
622static bool interp__builtin_abs(InterpState &S, CodePtr OpPC,
623 const InterpFrame *Frame,
624 const CallExpr *Call) {
625 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
626 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
627 if (Val ==
628 APSInt(APInt::getSignedMinValue(numBits: Val.getBitWidth()), /*IsUnsigned=*/false))
629 return false;
630 if (Val.isNegative())
631 Val.negate();
632 pushInteger(S, Val, QT: Call->getType());
633 return true;
634}
635
636static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC,
637 const InterpFrame *Frame,
638 const CallExpr *Call) {
639 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
640 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
641 pushInteger(S, Val: Val.popcount(), QT: Call->getType());
642 return true;
643}
644
645static bool interp__builtin_parity(InterpState &S, CodePtr OpPC,
646 const InterpFrame *Frame,
647 const CallExpr *Call) {
648 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
649 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
650 pushInteger(S, Val: Val.popcount() % 2, QT: Call->getType());
651 return true;
652}
653
654static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC,
655 const InterpFrame *Frame,
656 const CallExpr *Call) {
657 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
658 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
659 pushInteger(S, Val: Val.getBitWidth() - Val.getSignificantBits(), QT: Call->getType());
660 return true;
661}
662
663static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC,
664 const InterpFrame *Frame,
665 const CallExpr *Call) {
666 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
667 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
668 pushInteger(S, Val: Val.reverseBits(), QT: Call->getType());
669 return true;
670}
671
672static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC,
673 const InterpFrame *Frame,
674 const CallExpr *Call) {
675 // This is an unevaluated call, so there are no arguments on the stack.
676 assert(Call->getNumArgs() == 1);
677 const Expr *Arg = Call->getArg(Arg: 0);
678
679 GCCTypeClass ResultClass =
680 EvaluateBuiltinClassifyType(T: Arg->getType(), LangOpts: S.getLangOpts());
681 int32_t ReturnVal = static_cast<int32_t>(ResultClass);
682 pushInteger(S, Val: ReturnVal, QT: Call->getType());
683 return true;
684}
685
686// __builtin_expect(long, long)
687// __builtin_expect_with_probability(long, long, double)
688static bool interp__builtin_expect(InterpState &S, CodePtr OpPC,
689 const InterpFrame *Frame,
690 const CallExpr *Call) {
691 // The return value is simply the value of the first parameter.
692 // We ignore the probability.
693 unsigned NumArgs = Call->getNumArgs();
694 assert(NumArgs == 2 || NumArgs == 3);
695
696 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
697 if (NumArgs == 3)
698 S.Stk.discard<Floating>();
699 discard(Stk&: S.Stk, T: ArgT);
700
701 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
702 pushInteger(S, Val, QT: Call->getType());
703 return true;
704}
705
706/// rotateleft(value, amount)
707static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC,
708 const InterpFrame *Frame,
709 const CallExpr *Call, bool Right) {
710 PrimType AmountT = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType());
711 PrimType ValueT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
712
713 APSInt Amount = popToAPSInt(Stk&: S.Stk, T: AmountT);
714 APSInt Value = popToAPSInt(Stk&: S.Stk, T: ValueT);
715
716 APSInt Result;
717 if (Right)
718 Result = APSInt(Value.rotr(rotateAmt: Amount.urem(RHS: Value.getBitWidth())),
719 /*IsUnsigned=*/true);
720 else // Left.
721 Result = APSInt(Value.rotl(rotateAmt: Amount.urem(RHS: Value.getBitWidth())),
722 /*IsUnsigned=*/true);
723
724 pushInteger(S, Val: Result, QT: Call->getType());
725 return true;
726}
727
728static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC,
729 const InterpFrame *Frame,
730 const CallExpr *Call) {
731 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
732 APSInt Value = popToAPSInt(Stk&: S.Stk, T: ArgT);
733
734 uint64_t N = Value.countr_zero();
735 pushInteger(S, Val: N == Value.getBitWidth() ? 0 : N + 1, QT: Call->getType());
736 return true;
737}
738
739static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC,
740 const InterpFrame *Frame,
741 const CallExpr *Call) {
742#ifndef NDEBUG
743 assert(Call->getArg(0)->isLValue());
744 PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
745 assert(PtrT == PT_Ptr &&
746 "Unsupported pointer type passed to __builtin_addressof()");
747#endif
748 return true;
749}
750
751static bool interp__builtin_move(InterpState &S, CodePtr OpPC,
752 const InterpFrame *Frame,
753 const CallExpr *Call) {
754 return Call->getDirectCallee()->isConstexpr();
755}
756
757static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC,
758 const InterpFrame *Frame,
759 const CallExpr *Call) {
760 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
761 APSInt Arg = popToAPSInt(Stk&: S.Stk, T: ArgT);
762
763 int Result = S.getASTContext().getTargetInfo().getEHDataRegisterNumber(
764 RegNo: Arg.getZExtValue());
765 pushInteger(S, Val: Result, QT: Call->getType());
766 return true;
767}
768
769// Two integral values followed by a pointer (lhs, rhs, resultOut)
770static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC,
771 const CallExpr *Call,
772 unsigned BuiltinOp) {
773 const Pointer &ResultPtr = S.Stk.pop<Pointer>();
774 if (ResultPtr.isDummy())
775 return false;
776
777 PrimType RHST = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType());
778 PrimType LHST = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
779 APSInt RHS = popToAPSInt(Stk&: S.Stk, T: RHST);
780 APSInt LHS = popToAPSInt(Stk&: S.Stk, T: LHST);
781 QualType ResultType = Call->getArg(Arg: 2)->getType()->getPointeeType();
782 PrimType ResultT = *S.getContext().classify(T: ResultType);
783 bool Overflow;
784
785 APSInt Result;
786 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
787 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
788 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
789 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
790 ResultType->isSignedIntegerOrEnumerationType();
791 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
792 ResultType->isSignedIntegerOrEnumerationType();
793 uint64_t LHSSize = LHS.getBitWidth();
794 uint64_t RHSSize = RHS.getBitWidth();
795 uint64_t ResultSize = S.getASTContext().getTypeSize(T: ResultType);
796 uint64_t MaxBits = std::max(a: std::max(a: LHSSize, b: RHSSize), b: ResultSize);
797
798 // Add an additional bit if the signedness isn't uniformly agreed to. We
799 // could do this ONLY if there is a signed and an unsigned that both have
800 // MaxBits, but the code to check that is pretty nasty. The issue will be
801 // caught in the shrink-to-result later anyway.
802 if (IsSigned && !AllSigned)
803 ++MaxBits;
804
805 LHS = APSInt(LHS.extOrTrunc(width: MaxBits), !IsSigned);
806 RHS = APSInt(RHS.extOrTrunc(width: MaxBits), !IsSigned);
807 Result = APSInt(MaxBits, !IsSigned);
808 }
809
810 // Find largest int.
811 switch (BuiltinOp) {
812 default:
813 llvm_unreachable("Invalid value for BuiltinOp");
814 case Builtin::BI__builtin_add_overflow:
815 case Builtin::BI__builtin_sadd_overflow:
816 case Builtin::BI__builtin_saddl_overflow:
817 case Builtin::BI__builtin_saddll_overflow:
818 case Builtin::BI__builtin_uadd_overflow:
819 case Builtin::BI__builtin_uaddl_overflow:
820 case Builtin::BI__builtin_uaddll_overflow:
821 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
822 : LHS.uadd_ov(RHS, Overflow);
823 break;
824 case Builtin::BI__builtin_sub_overflow:
825 case Builtin::BI__builtin_ssub_overflow:
826 case Builtin::BI__builtin_ssubl_overflow:
827 case Builtin::BI__builtin_ssubll_overflow:
828 case Builtin::BI__builtin_usub_overflow:
829 case Builtin::BI__builtin_usubl_overflow:
830 case Builtin::BI__builtin_usubll_overflow:
831 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
832 : LHS.usub_ov(RHS, Overflow);
833 break;
834 case Builtin::BI__builtin_mul_overflow:
835 case Builtin::BI__builtin_smul_overflow:
836 case Builtin::BI__builtin_smull_overflow:
837 case Builtin::BI__builtin_smulll_overflow:
838 case Builtin::BI__builtin_umul_overflow:
839 case Builtin::BI__builtin_umull_overflow:
840 case Builtin::BI__builtin_umulll_overflow:
841 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
842 : LHS.umul_ov(RHS, Overflow);
843 break;
844 }
845
846 // In the case where multiple sizes are allowed, truncate and see if
847 // the values are the same.
848 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
849 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
850 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
851 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
852 // since it will give us the behavior of a TruncOrSelf in the case where
853 // its parameter <= its size. We previously set Result to be at least the
854 // type-size of the result, so getTypeSize(ResultType) <= Resu
855 APSInt Temp = Result.extOrTrunc(width: S.getASTContext().getTypeSize(T: ResultType));
856 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
857
858 if (!APSInt::isSameValue(I1: Temp, I2: Result))
859 Overflow = true;
860 Result = std::move(Temp);
861 }
862
863 // Write Result to ResultPtr and put Overflow on the stack.
864 assignInteger(S, Dest: ResultPtr, ValueT: ResultT, Value: Result);
865 if (ResultPtr.canBeInitialized())
866 ResultPtr.initialize();
867
868 assert(Call->getDirectCallee()->getReturnType()->isBooleanType());
869 S.Stk.push<Boolean>(Args&: Overflow);
870 return true;
871}
872
873/// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
874static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC,
875 const InterpFrame *Frame,
876 const CallExpr *Call, unsigned BuiltinOp) {
877 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
878 PrimType LHST = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
879 PrimType RHST = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType());
880 APSInt CarryIn = popToAPSInt(Stk&: S.Stk, T: LHST);
881 APSInt RHS = popToAPSInt(Stk&: S.Stk, T: RHST);
882 APSInt LHS = popToAPSInt(Stk&: S.Stk, T: LHST);
883
884 APSInt CarryOut;
885
886 APSInt Result;
887 // Copy the number of bits and sign.
888 Result = LHS;
889 CarryOut = LHS;
890
891 bool FirstOverflowed = false;
892 bool SecondOverflowed = false;
893 switch (BuiltinOp) {
894 default:
895 llvm_unreachable("Invalid value for BuiltinOp");
896 case Builtin::BI__builtin_addcb:
897 case Builtin::BI__builtin_addcs:
898 case Builtin::BI__builtin_addc:
899 case Builtin::BI__builtin_addcl:
900 case Builtin::BI__builtin_addcll:
901 Result =
902 LHS.uadd_ov(RHS, Overflow&: FirstOverflowed).uadd_ov(RHS: CarryIn, Overflow&: SecondOverflowed);
903 break;
904 case Builtin::BI__builtin_subcb:
905 case Builtin::BI__builtin_subcs:
906 case Builtin::BI__builtin_subc:
907 case Builtin::BI__builtin_subcl:
908 case Builtin::BI__builtin_subcll:
909 Result =
910 LHS.usub_ov(RHS, Overflow&: FirstOverflowed).usub_ov(RHS: CarryIn, Overflow&: SecondOverflowed);
911 break;
912 }
913 // It is possible for both overflows to happen but CGBuiltin uses an OR so
914 // this is consistent.
915 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
916
917 QualType CarryOutType = Call->getArg(Arg: 3)->getType()->getPointeeType();
918 PrimType CarryOutT = *S.getContext().classify(T: CarryOutType);
919 assignInteger(S, Dest: CarryOutPtr, ValueT: CarryOutT, Value: CarryOut);
920 CarryOutPtr.initialize();
921
922 assert(Call->getType() == Call->getArg(0)->getType());
923 pushInteger(S, Val: Result, QT: Call->getType());
924 return true;
925}
926
927static bool interp__builtin_clz(InterpState &S, CodePtr OpPC,
928 const InterpFrame *Frame, const CallExpr *Call,
929 unsigned BuiltinOp) {
930
931 std::optional<APSInt> Fallback;
932 if (BuiltinOp == Builtin::BI__builtin_clzg && Call->getNumArgs() == 2) {
933 PrimType FallbackT = *S.getContext().classify(E: Call->getArg(Arg: 1));
934 Fallback = popToAPSInt(Stk&: S.Stk, T: FallbackT);
935 }
936 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
937 const APSInt &Val = popToAPSInt(Stk&: S.Stk, T: ValT);
938
939 // When the argument is 0, the result of GCC builtins is undefined, whereas
940 // for Microsoft intrinsics, the result is the bit-width of the argument.
941 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
942 BuiltinOp != Builtin::BI__lzcnt &&
943 BuiltinOp != Builtin::BI__lzcnt64;
944
945 if (Val == 0) {
946 if (Fallback) {
947 pushInteger(S, Val: *Fallback, QT: Call->getType());
948 return true;
949 }
950
951 if (ZeroIsUndefined)
952 return false;
953 }
954
955 pushInteger(S, Val: Val.countl_zero(), QT: Call->getType());
956 return true;
957}
958
959static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC,
960 const InterpFrame *Frame, const CallExpr *Call,
961 unsigned BuiltinID) {
962 std::optional<APSInt> Fallback;
963 if (BuiltinID == Builtin::BI__builtin_ctzg && Call->getNumArgs() == 2) {
964 PrimType FallbackT = *S.getContext().classify(E: Call->getArg(Arg: 1));
965 Fallback = popToAPSInt(Stk&: S.Stk, T: FallbackT);
966 }
967 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
968 const APSInt &Val = popToAPSInt(Stk&: S.Stk, T: ValT);
969
970 if (Val == 0) {
971 if (Fallback) {
972 pushInteger(S, Val: *Fallback, QT: Call->getType());
973 return true;
974 }
975 return false;
976 }
977
978 pushInteger(S, Val: Val.countr_zero(), QT: Call->getType());
979 return true;
980}
981
982static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC,
983 const InterpFrame *Frame,
984 const CallExpr *Call) {
985 PrimType ReturnT = *S.getContext().classify(T: Call->getType());
986 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
987 const APSInt &Val = popToAPSInt(Stk&: S.Stk, T: ValT);
988 assert(Val.getActiveBits() <= 64);
989
990 INT_TYPE_SWITCH(ReturnT,
991 { S.Stk.push<T>(T::from(Val.byteSwap().getZExtValue())); });
992 return true;
993}
994
995/// bool __atomic_always_lock_free(size_t, void const volatile*)
996/// bool __atomic_is_lock_free(size_t, void const volatile*)
997static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC,
998 const InterpFrame *Frame,
999 const CallExpr *Call,
1000 unsigned BuiltinOp) {
1001 auto returnBool = [&S](bool Value) -> bool {
1002 S.Stk.push<Boolean>(Args&: Value);
1003 return true;
1004 };
1005
1006 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
1007 const Pointer &Ptr = S.Stk.pop<Pointer>();
1008 const APSInt &SizeVal = popToAPSInt(Stk&: S.Stk, T: ValT);
1009
1010 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
1011 // of two less than or equal to the maximum inline atomic width, we know it
1012 // is lock-free. If the size isn't a power of two, or greater than the
1013 // maximum alignment where we promote atomics, we know it is not lock-free
1014 // (at least not in the sense of atomic_is_lock_free). Otherwise,
1015 // the answer can only be determined at runtime; for example, 16-byte
1016 // atomics have lock-free implementations on some, but not all,
1017 // x86-64 processors.
1018
1019 // Check power-of-two.
1020 CharUnits Size = CharUnits::fromQuantity(Quantity: SizeVal.getZExtValue());
1021 if (Size.isPowerOfTwo()) {
1022 // Check against inlining width.
1023 unsigned InlineWidthBits =
1024 S.getASTContext().getTargetInfo().getMaxAtomicInlineWidth();
1025 if (Size <= S.getASTContext().toCharUnitsFromBits(BitSize: InlineWidthBits)) {
1026
1027 // OK, we will inline appropriately-aligned operations of this size,
1028 // and _Atomic(T) is appropriately-aligned.
1029 if (Size == CharUnits::One())
1030 return returnBool(true);
1031
1032 // Same for null pointers.
1033 assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
1034 if (Ptr.isZero())
1035 return returnBool(true);
1036
1037 if (Ptr.isIntegralPointer()) {
1038 uint64_t IntVal = Ptr.getIntegerRepresentation();
1039 if (APSInt(APInt(64, IntVal, false), true).isAligned(A: Size.getAsAlign()))
1040 return returnBool(true);
1041 }
1042
1043 const Expr *PtrArg = Call->getArg(Arg: 1);
1044 // Otherwise, check if the type's alignment against Size.
1045 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(Val: PtrArg)) {
1046 // Drop the potential implicit-cast to 'const volatile void*', getting
1047 // the underlying type.
1048 if (ICE->getCastKind() == CK_BitCast)
1049 PtrArg = ICE->getSubExpr();
1050 }
1051
1052 if (auto PtrTy = PtrArg->getType()->getAs<PointerType>()) {
1053 QualType PointeeType = PtrTy->getPointeeType();
1054 if (!PointeeType->isIncompleteType() &&
1055 S.getASTContext().getTypeAlignInChars(T: PointeeType) >= Size) {
1056 // OK, we will inline operations on this object.
1057 return returnBool(true);
1058 }
1059 }
1060 }
1061 }
1062
1063 if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
1064 return returnBool(false);
1065
1066 return false;
1067}
1068
1069/// bool __c11_atomic_is_lock_free(size_t)
1070static bool interp__builtin_c11_atomic_is_lock_free(InterpState &S,
1071 CodePtr OpPC,
1072 const InterpFrame *Frame,
1073 const CallExpr *Call) {
1074 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
1075 const APSInt &SizeVal = popToAPSInt(Stk&: S.Stk, T: ValT);
1076
1077 auto returnBool = [&S](bool Value) -> bool {
1078 S.Stk.push<Boolean>(Args&: Value);
1079 return true;
1080 };
1081
1082 CharUnits Size = CharUnits::fromQuantity(Quantity: SizeVal.getZExtValue());
1083 if (Size.isPowerOfTwo()) {
1084 // Check against inlining width.
1085 unsigned InlineWidthBits =
1086 S.getASTContext().getTargetInfo().getMaxAtomicInlineWidth();
1087 if (Size <= S.getASTContext().toCharUnitsFromBits(BitSize: InlineWidthBits))
1088 return returnBool(true);
1089 }
1090
1091 return false; // returnBool(false);
1092}
1093
1094/// __builtin_complex(Float A, float B);
1095static bool interp__builtin_complex(InterpState &S, CodePtr OpPC,
1096 const InterpFrame *Frame,
1097 const CallExpr *Call) {
1098 const Floating &Arg2 = S.Stk.pop<Floating>();
1099 const Floating &Arg1 = S.Stk.pop<Floating>();
1100 Pointer &Result = S.Stk.peek<Pointer>();
1101
1102 Result.atIndex(Idx: 0).deref<Floating>() = Arg1;
1103 Result.atIndex(Idx: 0).initialize();
1104 Result.atIndex(Idx: 1).deref<Floating>() = Arg2;
1105 Result.atIndex(Idx: 1).initialize();
1106 Result.initialize();
1107
1108 return true;
1109}
1110
1111/// __builtin_is_aligned()
1112/// __builtin_align_up()
1113/// __builtin_align_down()
1114/// The first parameter is either an integer or a pointer.
1115/// The second parameter is the requested alignment as an integer.
1116static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC,
1117 const InterpFrame *Frame,
1118 const CallExpr *Call,
1119 unsigned BuiltinOp) {
1120 PrimType AlignmentT = *S.Ctx.classify(E: Call->getArg(Arg: 1));
1121 const APSInt &Alignment = popToAPSInt(Stk&: S.Stk, T: AlignmentT);
1122
1123 if (Alignment < 0 || !Alignment.isPowerOf2()) {
1124 S.FFDiag(E: Call, DiagId: diag::note_constexpr_invalid_alignment) << Alignment;
1125 return false;
1126 }
1127 unsigned SrcWidth = S.getASTContext().getIntWidth(T: Call->getArg(Arg: 0)->getType());
1128 APSInt MaxValue(APInt::getOneBitSet(numBits: SrcWidth, BitNo: SrcWidth - 1));
1129 if (APSInt::compareValues(I1: Alignment, I2: MaxValue) > 0) {
1130 S.FFDiag(E: Call, DiagId: diag::note_constexpr_alignment_too_big)
1131 << MaxValue << Call->getArg(Arg: 0)->getType() << Alignment;
1132 return false;
1133 }
1134
1135 // The first parameter is either an integer or a pointer (but not a function
1136 // pointer).
1137 PrimType FirstArgT = *S.Ctx.classify(E: Call->getArg(Arg: 0));
1138
1139 if (isIntegralType(T: FirstArgT)) {
1140 const APSInt &Src = popToAPSInt(Stk&: S.Stk, T: FirstArgT);
1141 APInt AlignMinusOne = Alignment.extOrTrunc(width: Src.getBitWidth()) - 1;
1142 if (BuiltinOp == Builtin::BI__builtin_align_up) {
1143 APSInt AlignedVal =
1144 APSInt((Src + AlignMinusOne) & ~AlignMinusOne, Src.isUnsigned());
1145 pushInteger(S, Val: AlignedVal, QT: Call->getType());
1146 } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
1147 APSInt AlignedVal = APSInt(Src & ~AlignMinusOne, Src.isUnsigned());
1148 pushInteger(S, Val: AlignedVal, QT: Call->getType());
1149 } else {
1150 assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
1151 S.Stk.push<Boolean>(Args: (Src & AlignMinusOne) == 0);
1152 }
1153 return true;
1154 }
1155
1156 assert(FirstArgT == PT_Ptr);
1157 const Pointer &Ptr = S.Stk.pop<Pointer>();
1158
1159 unsigned PtrOffset = Ptr.getByteOffset();
1160 PtrOffset = Ptr.getIndex();
1161 CharUnits BaseAlignment =
1162 S.getASTContext().getDeclAlign(D: Ptr.getDeclDesc()->asValueDecl());
1163 CharUnits PtrAlign =
1164 BaseAlignment.alignmentAtOffset(offset: CharUnits::fromQuantity(Quantity: PtrOffset));
1165
1166 if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
1167 if (PtrAlign.getQuantity() >= Alignment) {
1168 S.Stk.push<Boolean>(Args: true);
1169 return true;
1170 }
1171 // If the alignment is not known to be sufficient, some cases could still
1172 // be aligned at run time. However, if the requested alignment is less or
1173 // equal to the base alignment and the offset is not aligned, we know that
1174 // the run-time value can never be aligned.
1175 if (BaseAlignment.getQuantity() >= Alignment &&
1176 PtrAlign.getQuantity() < Alignment) {
1177 S.Stk.push<Boolean>(Args: false);
1178 return true;
1179 }
1180
1181 S.FFDiag(E: Call->getArg(Arg: 0), DiagId: diag::note_constexpr_alignment_compute)
1182 << Alignment;
1183 return false;
1184 }
1185
1186 assert(BuiltinOp == Builtin::BI__builtin_align_down ||
1187 BuiltinOp == Builtin::BI__builtin_align_up);
1188
1189 // For align_up/align_down, we can return the same value if the alignment
1190 // is known to be greater or equal to the requested value.
1191 if (PtrAlign.getQuantity() >= Alignment) {
1192 S.Stk.push<Pointer>(Args: Ptr);
1193 return true;
1194 }
1195
1196 // The alignment could be greater than the minimum at run-time, so we cannot
1197 // infer much about the resulting pointer value. One case is possible:
1198 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
1199 // can infer the correct index if the requested alignment is smaller than
1200 // the base alignment so we can perform the computation on the offset.
1201 if (BaseAlignment.getQuantity() >= Alignment) {
1202 assert(Alignment.getBitWidth() <= 64 &&
1203 "Cannot handle > 64-bit address-space");
1204 uint64_t Alignment64 = Alignment.getZExtValue();
1205 CharUnits NewOffset =
1206 CharUnits::fromQuantity(Quantity: BuiltinOp == Builtin::BI__builtin_align_down
1207 ? llvm::alignDown(Value: PtrOffset, Align: Alignment64)
1208 : llvm::alignTo(Value: PtrOffset, Align: Alignment64));
1209
1210 S.Stk.push<Pointer>(Args: Ptr.atIndex(Idx: NewOffset.getQuantity()));
1211 return true;
1212 }
1213
1214 // Otherwise, we cannot constant-evaluate the result.
1215 S.FFDiag(E: Call->getArg(Arg: 0), DiagId: diag::note_constexpr_alignment_adjust) << Alignment;
1216 return false;
1217}
1218
1219/// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
1220static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC,
1221 const InterpFrame *Frame,
1222 const CallExpr *Call) {
1223 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
1224
1225 std::optional<APSInt> ExtraOffset;
1226 if (Call->getNumArgs() == 3)
1227 ExtraOffset = popToAPSInt(Stk&: S.Stk, T: *S.Ctx.classify(E: Call->getArg(Arg: 2)));
1228
1229 APSInt Alignment = popToAPSInt(Stk&: S.Stk, T: *S.Ctx.classify(E: Call->getArg(Arg: 1)));
1230 const Pointer &Ptr = S.Stk.pop<Pointer>();
1231
1232 CharUnits Align = CharUnits::fromQuantity(Quantity: Alignment.getZExtValue());
1233
1234 // If there is a base object, then it must have the correct alignment.
1235 if (Ptr.isBlockPointer()) {
1236 CharUnits BaseAlignment;
1237 if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
1238 BaseAlignment = S.getASTContext().getDeclAlign(D: VD);
1239 else if (const auto *E = Ptr.getDeclDesc()->asExpr())
1240 BaseAlignment = GetAlignOfExpr(Ctx: S.getASTContext(), E, ExprKind: UETT_AlignOf);
1241
1242 if (BaseAlignment < Align) {
1243 S.CCEDiag(E: Call->getArg(Arg: 0),
1244 DiagId: diag::note_constexpr_baa_insufficient_alignment)
1245 << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
1246 return false;
1247 }
1248 }
1249
1250 APValue AV = Ptr.toAPValue(ASTCtx: S.getASTContext());
1251 CharUnits AVOffset = AV.getLValueOffset();
1252 if (ExtraOffset)
1253 AVOffset -= CharUnits::fromQuantity(Quantity: ExtraOffset->getZExtValue());
1254 if (AVOffset.alignTo(Align) != AVOffset) {
1255 if (Ptr.isBlockPointer())
1256 S.CCEDiag(E: Call->getArg(Arg: 0),
1257 DiagId: diag::note_constexpr_baa_insufficient_alignment)
1258 << 1 << AVOffset.getQuantity() << Align.getQuantity();
1259 else
1260 S.CCEDiag(E: Call->getArg(Arg: 0),
1261 DiagId: diag::note_constexpr_baa_value_insufficient_alignment)
1262 << AVOffset.getQuantity() << Align.getQuantity();
1263 return false;
1264 }
1265
1266 S.Stk.push<Pointer>(Args: Ptr);
1267 return true;
1268}
1269
1270static bool interp__builtin_ia32_bextr(InterpState &S, CodePtr OpPC,
1271 const InterpFrame *Frame,
1272 const CallExpr *Call) {
1273 if (Call->getNumArgs() != 2 || !Call->getArg(Arg: 0)->getType()->isIntegerType() ||
1274 !Call->getArg(Arg: 1)->getType()->isIntegerType())
1275 return false;
1276
1277 PrimType ValT = *S.Ctx.classify(E: Call->getArg(Arg: 0));
1278 PrimType IndexT = *S.Ctx.classify(E: Call->getArg(Arg: 1));
1279 APSInt Index = popToAPSInt(Stk&: S.Stk, T: IndexT);
1280 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ValT);
1281
1282 unsigned BitWidth = Val.getBitWidth();
1283 uint64_t Shift = Index.extractBitsAsZExtValue(numBits: 8, bitPosition: 0);
1284 uint64_t Length = Index.extractBitsAsZExtValue(numBits: 8, bitPosition: 8);
1285 Length = Length > BitWidth ? BitWidth : Length;
1286
1287 // Handle out of bounds cases.
1288 if (Length == 0 || Shift >= BitWidth) {
1289 pushInteger(S, Val: 0, QT: Call->getType());
1290 return true;
1291 }
1292
1293 uint64_t Result = Val.getZExtValue() >> Shift;
1294 Result &= llvm::maskTrailingOnes<uint64_t>(N: Length);
1295 pushInteger(S, Val: Result, QT: Call->getType());
1296 return true;
1297}
1298
1299static bool interp__builtin_ia32_bzhi(InterpState &S, CodePtr OpPC,
1300 const InterpFrame *Frame,
1301 const CallExpr *Call) {
1302 QualType CallType = Call->getType();
1303 if (Call->getNumArgs() != 2 || !Call->getArg(Arg: 0)->getType()->isIntegerType() ||
1304 !Call->getArg(Arg: 1)->getType()->isIntegerType() ||
1305 !CallType->isIntegerType())
1306 return false;
1307
1308 PrimType ValT = *S.Ctx.classify(E: Call->getArg(Arg: 0));
1309 PrimType IndexT = *S.Ctx.classify(E: Call->getArg(Arg: 1));
1310
1311 APSInt Idx = popToAPSInt(Stk&: S.Stk, T: IndexT);
1312 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ValT);
1313
1314 unsigned BitWidth = Val.getBitWidth();
1315 uint64_t Index = Idx.extractBitsAsZExtValue(numBits: 8, bitPosition: 0);
1316
1317 if (Index < BitWidth)
1318 Val.clearHighBits(hiBits: BitWidth - Index);
1319
1320 pushInteger(S, Val, QT: CallType);
1321 return true;
1322}
1323
1324static bool interp__builtin_ia32_lzcnt(InterpState &S, CodePtr OpPC,
1325 const InterpFrame *Frame,
1326 const CallExpr *Call) {
1327 QualType CallType = Call->getType();
1328 if (!CallType->isIntegerType() ||
1329 !Call->getArg(Arg: 0)->getType()->isIntegerType())
1330 return false;
1331
1332 APSInt Val = popToAPSInt(Stk&: S.Stk, T: *S.Ctx.classify(E: Call->getArg(Arg: 0)));
1333 pushInteger(S, Val: Val.countLeadingZeros(), QT: CallType);
1334 return true;
1335}
1336
1337static bool interp__builtin_ia32_tzcnt(InterpState &S, CodePtr OpPC,
1338 const InterpFrame *Frame,
1339 const CallExpr *Call) {
1340 QualType CallType = Call->getType();
1341 if (!CallType->isIntegerType() ||
1342 !Call->getArg(Arg: 0)->getType()->isIntegerType())
1343 return false;
1344
1345 APSInt Val = popToAPSInt(Stk&: S.Stk, T: *S.Ctx.classify(E: Call->getArg(Arg: 0)));
1346 pushInteger(S, Val: Val.countTrailingZeros(), QT: CallType);
1347 return true;
1348}
1349
1350static bool interp__builtin_ia32_pdep(InterpState &S, CodePtr OpPC,
1351 const InterpFrame *Frame,
1352 const CallExpr *Call) {
1353 if (Call->getNumArgs() != 2 || !Call->getArg(Arg: 0)->getType()->isIntegerType() ||
1354 !Call->getArg(Arg: 1)->getType()->isIntegerType())
1355 return false;
1356
1357 PrimType ValT = *S.Ctx.classify(E: Call->getArg(Arg: 0));
1358 PrimType MaskT = *S.Ctx.classify(E: Call->getArg(Arg: 1));
1359
1360 APSInt Mask = popToAPSInt(Stk&: S.Stk, T: MaskT);
1361 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ValT);
1362
1363 unsigned BitWidth = Val.getBitWidth();
1364 APInt Result = APInt::getZero(numBits: BitWidth);
1365 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1366 if (Mask[I])
1367 Result.setBitVal(BitPosition: I, BitValue: Val[P++]);
1368 }
1369 pushInteger(S, Val: std::move(Result), QT: Call->getType());
1370 return true;
1371}
1372
1373static bool interp__builtin_ia32_pext(InterpState &S, CodePtr OpPC,
1374 const InterpFrame *Frame,
1375 const CallExpr *Call) {
1376 if (Call->getNumArgs() != 2 || !Call->getArg(Arg: 0)->getType()->isIntegerType() ||
1377 !Call->getArg(Arg: 1)->getType()->isIntegerType())
1378 return false;
1379
1380 PrimType ValT = *S.Ctx.classify(E: Call->getArg(Arg: 0));
1381 PrimType MaskT = *S.Ctx.classify(E: Call->getArg(Arg: 1));
1382
1383 APSInt Mask = popToAPSInt(Stk&: S.Stk, T: MaskT);
1384 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ValT);
1385
1386 unsigned BitWidth = Val.getBitWidth();
1387 APInt Result = APInt::getZero(numBits: BitWidth);
1388 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1389 if (Mask[I])
1390 Result.setBitVal(BitPosition: P++, BitValue: Val[I]);
1391 }
1392 pushInteger(S, Val: std::move(Result), QT: Call->getType());
1393 return true;
1394}
1395
1396/// (CarryIn, LHS, RHS, Result)
1397static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S,
1398 CodePtr OpPC,
1399 const InterpFrame *Frame,
1400 const CallExpr *Call,
1401 unsigned BuiltinOp) {
1402 if (Call->getNumArgs() != 4 || !Call->getArg(Arg: 0)->getType()->isIntegerType() ||
1403 !Call->getArg(Arg: 1)->getType()->isIntegerType() ||
1404 !Call->getArg(Arg: 2)->getType()->isIntegerType())
1405 return false;
1406
1407 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
1408
1409 PrimType CarryInT = *S.getContext().classify(E: Call->getArg(Arg: 0));
1410 PrimType LHST = *S.getContext().classify(E: Call->getArg(Arg: 1));
1411 PrimType RHST = *S.getContext().classify(E: Call->getArg(Arg: 2));
1412 APSInt RHS = popToAPSInt(Stk&: S.Stk, T: RHST);
1413 APSInt LHS = popToAPSInt(Stk&: S.Stk, T: LHST);
1414 APSInt CarryIn = popToAPSInt(Stk&: S.Stk, T: CarryInT);
1415
1416 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
1417 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
1418
1419 unsigned BitWidth = LHS.getBitWidth();
1420 unsigned CarryInBit = CarryIn.ugt(RHS: 0) ? 1 : 0;
1421 APInt ExResult =
1422 IsAdd ? (LHS.zext(width: BitWidth + 1) + (RHS.zext(width: BitWidth + 1) + CarryInBit))
1423 : (LHS.zext(width: BitWidth + 1) - (RHS.zext(width: BitWidth + 1) + CarryInBit));
1424
1425 APInt Result = ExResult.extractBits(numBits: BitWidth, bitPosition: 0);
1426 APSInt CarryOut =
1427 APSInt(ExResult.extractBits(numBits: 1, bitPosition: BitWidth), /*IsUnsigned=*/true);
1428
1429 QualType CarryOutType = Call->getArg(Arg: 3)->getType()->getPointeeType();
1430 PrimType CarryOutT = *S.getContext().classify(T: CarryOutType);
1431 assignInteger(S, Dest: CarryOutPtr, ValueT: CarryOutT, Value: APSInt(std::move(Result), true));
1432
1433 pushInteger(S, Val: CarryOut, QT: Call->getType());
1434
1435 return true;
1436}
1437
1438static bool interp__builtin_os_log_format_buffer_size(InterpState &S,
1439 CodePtr OpPC,
1440 const InterpFrame *Frame,
1441 const CallExpr *Call) {
1442 analyze_os_log::OSLogBufferLayout Layout;
1443 analyze_os_log::computeOSLogBufferLayout(Ctx&: S.getASTContext(), E: Call, layout&: Layout);
1444 pushInteger(S, Val: Layout.size().getQuantity(), QT: Call->getType());
1445 return true;
1446}
1447
1448static bool
1449interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC,
1450 const InterpFrame *Frame,
1451 const CallExpr *Call) {
1452 const auto &Ptr = S.Stk.pop<Pointer>();
1453 assert(Ptr.getFieldDesc()->isPrimitiveArray());
1454
1455 // This should be created for a StringLiteral, so should alway shold at least
1456 // one array element.
1457 assert(Ptr.getFieldDesc()->getNumElems() >= 1);
1458 StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
1459 uint64_t Result = getPointerAuthStableSipHash(S: R);
1460 pushInteger(S, Val: Result, QT: Call->getType());
1461 return true;
1462}
1463
1464static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC,
1465 const InterpFrame *Frame,
1466 const CallExpr *Call) {
1467 // A call to __operator_new is only valid within std::allocate<>::allocate.
1468 // Walk up the call stack to find the appropriate caller and get the
1469 // element type from it.
1470 auto [NewCall, ElemType] = S.getStdAllocatorCaller(Name: "allocate");
1471
1472 if (ElemType.isNull()) {
1473 S.FFDiag(E: Call, DiagId: S.getLangOpts().CPlusPlus20
1474 ? diag::note_constexpr_new_untyped
1475 : diag::note_constexpr_new);
1476 return false;
1477 }
1478 assert(NewCall);
1479
1480 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
1481 S.FFDiag(E: Call, DiagId: diag::note_constexpr_new_not_complete_object_type)
1482 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
1483 return false;
1484 }
1485
1486 // We only care about the first parameter (the size), so discard all the
1487 // others.
1488 {
1489 unsigned NumArgs = Call->getNumArgs();
1490 assert(NumArgs >= 1);
1491
1492 // The std::nothrow_t arg never gets put on the stack.
1493 if (Call->getArg(Arg: NumArgs - 1)->getType()->isNothrowT())
1494 --NumArgs;
1495 auto Args = ArrayRef(Call->getArgs(), Call->getNumArgs());
1496 // First arg is needed.
1497 Args = Args.drop_front();
1498
1499 // Discard the rest.
1500 for (const Expr *Arg : Args)
1501 discard(Stk&: S.Stk, T: *S.getContext().classify(E: Arg));
1502 }
1503
1504 APSInt Bytes = popToAPSInt(Stk&: S.Stk, T: *S.getContext().classify(E: Call->getArg(Arg: 0)));
1505 CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(T: ElemType);
1506 assert(!ElemSize.isZero());
1507 // Divide the number of bytes by sizeof(ElemType), so we get the number of
1508 // elements we should allocate.
1509 APInt NumElems, Remainder;
1510 APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
1511 APInt::udivrem(LHS: Bytes, RHS: ElemSizeAP, Quotient&: NumElems, Remainder);
1512 if (Remainder != 0) {
1513 // This likely indicates a bug in the implementation of 'std::allocator'.
1514 S.FFDiag(E: Call, DiagId: diag::note_constexpr_operator_new_bad_size)
1515 << Bytes << APSInt(ElemSizeAP, true) << ElemType;
1516 return false;
1517 }
1518
1519 // NB: The same check we're using in CheckArraySize()
1520 if (NumElems.getActiveBits() >
1521 ConstantArrayType::getMaxSizeBits(Context: S.getASTContext()) ||
1522 NumElems.ugt(RHS: Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
1523 // FIXME: NoThrow check?
1524 const SourceInfo &Loc = S.Current->getSource(PC: OpPC);
1525 S.FFDiag(SI: Loc, DiagId: diag::note_constexpr_new_too_large)
1526 << NumElems.getZExtValue();
1527 return false;
1528 }
1529
1530 if (!CheckArraySize(S, OpPC, NumElems: NumElems.getZExtValue()))
1531 return false;
1532
1533 bool IsArray = NumElems.ugt(RHS: 1);
1534 std::optional<PrimType> ElemT = S.getContext().classify(T: ElemType);
1535 DynamicAllocator &Allocator = S.getAllocator();
1536 if (ElemT) {
1537 Block *B =
1538 Allocator.allocate(Source: NewCall, T: *ElemT, NumElements: NumElems.getZExtValue(),
1539 EvalID: S.Ctx.getEvalID(), AllocForm: DynamicAllocator::Form::Operator);
1540 assert(B);
1541 S.Stk.push<Pointer>(Args: Pointer(B).atIndex(Idx: 0));
1542 return true;
1543 }
1544
1545 assert(!ElemT);
1546
1547 // Composite arrays
1548 if (IsArray) {
1549 const Descriptor *Desc =
1550 S.P.createDescriptor(D: NewCall, Ty: ElemType.getTypePtr(),
1551 MDSize: IsArray ? std::nullopt : Descriptor::InlineDescMD);
1552 Block *B =
1553 Allocator.allocate(D: Desc, NumElements: NumElems.getZExtValue(), EvalID: S.Ctx.getEvalID(),
1554 AllocForm: DynamicAllocator::Form::Operator);
1555 assert(B);
1556 S.Stk.push<Pointer>(Args: Pointer(B).atIndex(Idx: 0));
1557 return true;
1558 }
1559
1560 // Records. Still allocate them as single-element arrays.
1561 QualType AllocType = S.getASTContext().getConstantArrayType(
1562 EltTy: ElemType, ArySize: NumElems, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
1563
1564 const Descriptor *Desc =
1565 S.P.createDescriptor(D: NewCall, Ty: AllocType.getTypePtr(),
1566 MDSize: IsArray ? std::nullopt : Descriptor::InlineDescMD);
1567 Block *B = Allocator.allocate(D: Desc, EvalID: S.getContext().getEvalID(),
1568 AllocForm: DynamicAllocator::Form::Operator);
1569 assert(B);
1570 S.Stk.push<Pointer>(Args: Pointer(B).atIndex(Idx: 0).narrow());
1571 return true;
1572}
1573
1574static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC,
1575 const InterpFrame *Frame,
1576 const CallExpr *Call) {
1577 const Expr *Source = nullptr;
1578 const Block *BlockToDelete = nullptr;
1579
1580 if (S.checkingPotentialConstantExpression()) {
1581 S.Stk.discard<Pointer>();
1582 return false;
1583 }
1584
1585 // This is permitted only within a call to std::allocator<T>::deallocate.
1586 if (!S.getStdAllocatorCaller(Name: "deallocate")) {
1587 S.FFDiag(E: Call);
1588 S.Stk.discard<Pointer>();
1589 return true;
1590 }
1591
1592 {
1593 const Pointer &Ptr = S.Stk.pop<Pointer>();
1594
1595 if (Ptr.isZero()) {
1596 S.CCEDiag(E: Call, DiagId: diag::note_constexpr_deallocate_null);
1597 return true;
1598 }
1599
1600 Source = Ptr.getDeclDesc()->asExpr();
1601 BlockToDelete = Ptr.block();
1602
1603 if (!BlockToDelete->isDynamic()) {
1604 S.FFDiag(E: Call, DiagId: diag::note_constexpr_delete_not_heap_alloc)
1605 << Ptr.toDiagnosticString(Ctx: S.getASTContext());
1606 if (const auto *D = Ptr.getFieldDesc()->asDecl())
1607 S.Note(Loc: D->getLocation(), DiagId: diag::note_declared_at);
1608 }
1609 }
1610 assert(BlockToDelete);
1611
1612 DynamicAllocator &Allocator = S.getAllocator();
1613 const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
1614 std::optional<DynamicAllocator::Form> AllocForm =
1615 Allocator.getAllocationForm(Source);
1616
1617 if (!Allocator.deallocate(Source, BlockToDelete, S)) {
1618 // Nothing has been deallocated, this must be a double-delete.
1619 const SourceInfo &Loc = S.Current->getSource(PC: OpPC);
1620 S.FFDiag(SI: Loc, DiagId: diag::note_constexpr_double_delete);
1621 return false;
1622 }
1623 assert(AllocForm);
1624
1625 return CheckNewDeleteForms(
1626 S, OpPC, AllocForm: *AllocForm, DeleteForm: DynamicAllocator::Form::Operator, D: BlockDesc, NewExpr: Source);
1627}
1628
1629static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC,
1630 const InterpFrame *Frame,
1631 const CallExpr *Call) {
1632 const Floating &Arg0 = S.Stk.pop<Floating>();
1633 S.Stk.push<Floating>(Args: Arg0);
1634 return true;
1635}
1636
1637static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC,
1638 const CallExpr *Call, unsigned ID) {
1639 const Pointer &Arg = S.Stk.pop<Pointer>();
1640 assert(Arg.getFieldDesc()->isPrimitiveArray());
1641
1642 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1643 assert(Call->getType() == ElemType);
1644 PrimType ElemT = *S.getContext().classify(T: ElemType);
1645 unsigned NumElems = Arg.getNumElems();
1646
1647 INT_TYPE_SWITCH_NO_BOOL(ElemT, {
1648 T Result = Arg.atIndex(0).deref<T>();
1649 unsigned BitWidth = Result.bitWidth();
1650 for (unsigned I = 1; I != NumElems; ++I) {
1651 T Elem = Arg.atIndex(I).deref<T>();
1652 T PrevResult = Result;
1653
1654 if (ID == Builtin::BI__builtin_reduce_add) {
1655 if (T::add(Result, Elem, BitWidth, &Result)) {
1656 unsigned OverflowBits = BitWidth + 1;
1657 (void)handleOverflow(S, OpPC,
1658 (PrevResult.toAPSInt(OverflowBits) +
1659 Elem.toAPSInt(OverflowBits)));
1660 return false;
1661 }
1662 } else if (ID == Builtin::BI__builtin_reduce_mul) {
1663 if (T::mul(Result, Elem, BitWidth, &Result)) {
1664 unsigned OverflowBits = BitWidth * 2;
1665 (void)handleOverflow(S, OpPC,
1666 (PrevResult.toAPSInt(OverflowBits) *
1667 Elem.toAPSInt(OverflowBits)));
1668 return false;
1669 }
1670
1671 } else if (ID == Builtin::BI__builtin_reduce_and) {
1672 (void)T::bitAnd(Result, Elem, BitWidth, &Result);
1673 } else if (ID == Builtin::BI__builtin_reduce_or) {
1674 (void)T::bitOr(Result, Elem, BitWidth, &Result);
1675 } else if (ID == Builtin::BI__builtin_reduce_xor) {
1676 (void)T::bitXor(Result, Elem, BitWidth, &Result);
1677 } else if (ID == Builtin::BI__builtin_reduce_min) {
1678 if (Elem < Result)
1679 Result = Elem;
1680 } else if (ID == Builtin::BI__builtin_reduce_max) {
1681 if (Elem > Result)
1682 Result = Elem;
1683 } else {
1684 llvm_unreachable("Unhandled vector reduce builtin");
1685 }
1686 }
1687 pushInteger(S, Result.toAPSInt(), Call->getType());
1688 });
1689
1690 return true;
1691}
1692
1693/// Can be called with an integer or vector as the first and only parameter.
1694static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC,
1695 const InterpFrame *Frame,
1696 const CallExpr *Call,
1697 unsigned BuiltinID) {
1698 assert(Call->getNumArgs() == 1);
1699 if (Call->getArg(Arg: 0)->getType()->isIntegerType()) {
1700 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
1701 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
1702
1703 if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) {
1704 pushInteger(S, Val: Val.popcount(), QT: Call->getType());
1705 } else {
1706 pushInteger(S, Val: Val.reverseBits(), QT: Call->getType());
1707 }
1708 return true;
1709 }
1710 // Otherwise, the argument must be a vector.
1711 assert(Call->getArg(0)->getType()->isVectorType());
1712 const Pointer &Arg = S.Stk.pop<Pointer>();
1713 assert(Arg.getFieldDesc()->isPrimitiveArray());
1714 const Pointer &Dst = S.Stk.peek<Pointer>();
1715 assert(Dst.getFieldDesc()->isPrimitiveArray());
1716 assert(Arg.getFieldDesc()->getNumElems() ==
1717 Dst.getFieldDesc()->getNumElems());
1718
1719 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1720 PrimType ElemT = *S.getContext().classify(T: ElemType);
1721 unsigned NumElems = Arg.getNumElems();
1722
1723 // FIXME: Reading from uninitialized vector elements?
1724 for (unsigned I = 0; I != NumElems; ++I) {
1725 INT_TYPE_SWITCH_NO_BOOL(ElemT, {
1726 if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) {
1727 Dst.atIndex(I).deref<T>() =
1728 T::from(Arg.atIndex(I).deref<T>().toAPSInt().popcount());
1729 } else {
1730 Dst.atIndex(I).deref<T>() = T::from(
1731 Arg.atIndex(I).deref<T>().toAPSInt().reverseBits().getZExtValue());
1732 }
1733 Dst.atIndex(I).initialize();
1734 });
1735 }
1736
1737 return true;
1738}
1739
1740static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC,
1741 const InterpFrame *Frame,
1742 const CallExpr *Call, unsigned ID) {
1743 assert(Call->getNumArgs() == 3);
1744 const ASTContext &ASTCtx = S.getASTContext();
1745 PrimType SizeT = *S.getContext().classify(E: Call->getArg(Arg: 2));
1746 APSInt Size = popToAPSInt(Stk&: S.Stk, T: SizeT);
1747 const Pointer SrcPtr = S.Stk.pop<Pointer>();
1748 const Pointer DestPtr = S.Stk.pop<Pointer>();
1749
1750 assert(!Size.isSigned() && "memcpy and friends take an unsigned size");
1751
1752 if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
1753 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1754
1755 bool Move =
1756 (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove ||
1757 ID == Builtin::BI__builtin_wmemmove || ID == Builtin::BIwmemmove);
1758 bool WChar = ID == Builtin::BIwmemcpy || ID == Builtin::BIwmemmove ||
1759 ID == Builtin::BI__builtin_wmemcpy ||
1760 ID == Builtin::BI__builtin_wmemmove;
1761
1762 // If the size is zero, we treat this as always being a valid no-op.
1763 if (Size.isZero()) {
1764 S.Stk.push<Pointer>(Args: DestPtr);
1765 return true;
1766 }
1767
1768 if (SrcPtr.isZero() || DestPtr.isZero()) {
1769 Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
1770 S.FFDiag(SI: S.Current->getSource(PC: OpPC), DiagId: diag::note_constexpr_memcpy_null)
1771 << /*IsMove=*/Move << /*IsWchar=*/WChar << !SrcPtr.isZero()
1772 << DiagPtr.toDiagnosticString(Ctx: ASTCtx);
1773 return false;
1774 }
1775
1776 // Diagnose integral src/dest pointers specially.
1777 if (SrcPtr.isIntegralPointer() || DestPtr.isIntegralPointer()) {
1778 std::string DiagVal = "(void *)";
1779 DiagVal += SrcPtr.isIntegralPointer()
1780 ? std::to_string(val: SrcPtr.getIntegerRepresentation())
1781 : std::to_string(val: DestPtr.getIntegerRepresentation());
1782 S.FFDiag(SI: S.Current->getSource(PC: OpPC), DiagId: diag::note_constexpr_memcpy_null)
1783 << Move << WChar << DestPtr.isIntegralPointer() << DiagVal;
1784 return false;
1785 }
1786
1787 // Can't read from dummy pointers.
1788 if (DestPtr.isDummy() || SrcPtr.isDummy())
1789 return false;
1790
1791 QualType DestElemType = getElemType(P: DestPtr);
1792 size_t RemainingDestElems;
1793 if (DestPtr.getFieldDesc()->isArray()) {
1794 RemainingDestElems = DestPtr.isUnknownSizeArray()
1795 ? 0
1796 : (DestPtr.getNumElems() - DestPtr.getIndex());
1797 } else {
1798 RemainingDestElems = 1;
1799 }
1800 unsigned DestElemSize = ASTCtx.getTypeSizeInChars(T: DestElemType).getQuantity();
1801
1802 if (WChar) {
1803 uint64_t WCharSize =
1804 ASTCtx.getTypeSizeInChars(T: ASTCtx.getWCharType()).getQuantity();
1805 Size *= APSInt(APInt(Size.getBitWidth(), WCharSize, /*IsSigned=*/false),
1806 /*IsUnsigend=*/true);
1807 }
1808
1809 if (Size.urem(RHS: DestElemSize) != 0) {
1810 S.FFDiag(SI: S.Current->getSource(PC: OpPC),
1811 DiagId: diag::note_constexpr_memcpy_unsupported)
1812 << Move << WChar << 0 << DestElemType << Size << DestElemSize;
1813 return false;
1814 }
1815
1816 QualType SrcElemType = getElemType(P: SrcPtr);
1817 size_t RemainingSrcElems;
1818 if (SrcPtr.getFieldDesc()->isArray()) {
1819 RemainingSrcElems = SrcPtr.isUnknownSizeArray()
1820 ? 0
1821 : (SrcPtr.getNumElems() - SrcPtr.getIndex());
1822 } else {
1823 RemainingSrcElems = 1;
1824 }
1825 unsigned SrcElemSize = ASTCtx.getTypeSizeInChars(T: SrcElemType).getQuantity();
1826
1827 if (!ASTCtx.hasSameUnqualifiedType(T1: DestElemType, T2: SrcElemType)) {
1828 S.FFDiag(SI: S.Current->getSource(PC: OpPC), DiagId: diag::note_constexpr_memcpy_type_pun)
1829 << Move << SrcElemType << DestElemType;
1830 return false;
1831 }
1832
1833 if (DestElemType->isIncompleteType() ||
1834 DestPtr.getType()->isIncompleteType()) {
1835 QualType DiagType =
1836 DestElemType->isIncompleteType() ? DestElemType : DestPtr.getType();
1837 S.FFDiag(SI: S.Current->getSource(PC: OpPC),
1838 DiagId: diag::note_constexpr_memcpy_incomplete_type)
1839 << Move << DiagType;
1840 return false;
1841 }
1842
1843 if (!DestElemType.isTriviallyCopyableType(Context: ASTCtx)) {
1844 S.FFDiag(SI: S.Current->getSource(PC: OpPC), DiagId: diag::note_constexpr_memcpy_nontrivial)
1845 << Move << DestElemType;
1846 return false;
1847 }
1848
1849 // Check if we have enough elements to read from and write to.
1850 size_t RemainingDestBytes = RemainingDestElems * DestElemSize;
1851 size_t RemainingSrcBytes = RemainingSrcElems * SrcElemSize;
1852 if (Size.ugt(RHS: RemainingDestBytes) || Size.ugt(RHS: RemainingSrcBytes)) {
1853 APInt N = Size.udiv(RHS: DestElemSize);
1854 S.FFDiag(SI: S.Current->getSource(PC: OpPC),
1855 DiagId: diag::note_constexpr_memcpy_unsupported)
1856 << Move << WChar << (Size.ugt(RHS: RemainingSrcBytes) ? 1 : 2)
1857 << DestElemType << toString(I: N, Radix: 10, /*Signed=*/false);
1858 return false;
1859 }
1860
1861 // Check for overlapping memory regions.
1862 if (!Move && Pointer::pointToSameBlock(A: SrcPtr, B: DestPtr)) {
1863 // Remove base casts.
1864 Pointer SrcP = SrcPtr;
1865 while (SrcP.isBaseClass())
1866 SrcP = SrcP.getBase();
1867
1868 Pointer DestP = DestPtr;
1869 while (DestP.isBaseClass())
1870 DestP = DestP.getBase();
1871
1872 unsigned SrcIndex = SrcP.expand().getIndex() * SrcP.elemSize();
1873 unsigned DstIndex = DestP.expand().getIndex() * DestP.elemSize();
1874 unsigned N = Size.getZExtValue();
1875
1876 if ((SrcIndex <= DstIndex && (SrcIndex + N) > DstIndex) ||
1877 (DstIndex <= SrcIndex && (DstIndex + N) > SrcIndex)) {
1878 S.FFDiag(SI: S.Current->getSource(PC: OpPC), DiagId: diag::note_constexpr_memcpy_overlap)
1879 << /*IsWChar=*/false;
1880 return false;
1881 }
1882 }
1883
1884 assert(Size.getZExtValue() % DestElemSize == 0);
1885 if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Size: Bytes(Size.getZExtValue()).toBits()))
1886 return false;
1887
1888 S.Stk.push<Pointer>(Args: DestPtr);
1889 return true;
1890}
1891
1892/// Determine if T is a character type for which we guarantee that
1893/// sizeof(T) == 1.
1894static bool isOneByteCharacterType(QualType T) {
1895 return T->isCharType() || T->isChar8Type();
1896}
1897
1898static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC,
1899 const InterpFrame *Frame,
1900 const CallExpr *Call, unsigned ID) {
1901 assert(Call->getNumArgs() == 3);
1902 PrimType SizeT = *S.getContext().classify(E: Call->getArg(Arg: 2));
1903 const APSInt &Size = popToAPSInt(Stk&: S.Stk, T: SizeT);
1904 const Pointer &PtrB = S.Stk.pop<Pointer>();
1905 const Pointer &PtrA = S.Stk.pop<Pointer>();
1906
1907 if (ID == Builtin::BImemcmp || ID == Builtin::BIbcmp ||
1908 ID == Builtin::BIwmemcmp)
1909 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1910
1911 if (Size.isZero()) {
1912 pushInteger(S, Val: 0, QT: Call->getType());
1913 return true;
1914 }
1915
1916 bool IsWide =
1917 (ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
1918
1919 const ASTContext &ASTCtx = S.getASTContext();
1920 QualType ElemTypeA = getElemType(P: PtrA);
1921 QualType ElemTypeB = getElemType(P: PtrB);
1922 // FIXME: This is an arbitrary limitation the current constant interpreter
1923 // had. We could remove this.
1924 if (!IsWide && (!isOneByteCharacterType(T: ElemTypeA) ||
1925 !isOneByteCharacterType(T: ElemTypeB))) {
1926 S.FFDiag(SI: S.Current->getSource(PC: OpPC),
1927 DiagId: diag::note_constexpr_memcmp_unsupported)
1928 << ASTCtx.BuiltinInfo.getQuotedName(ID) << PtrA.getType()
1929 << PtrB.getType();
1930 return false;
1931 }
1932
1933 if (PtrA.isDummy() || PtrB.isDummy())
1934 return false;
1935
1936 // Now, read both pointers to a buffer and compare those.
1937 BitcastBuffer BufferA(
1938 Bits(ASTCtx.getTypeSize(T: ElemTypeA) * PtrA.getNumElems()));
1939 readPointerToBuffer(Ctx: S.getContext(), FromPtr: PtrA, Buffer&: BufferA, ReturnOnUninit: false);
1940 // FIXME: The swapping here is UNDOING something we do when reading the
1941 // data into the buffer.
1942 if (ASTCtx.getTargetInfo().isBigEndian())
1943 swapBytes(M: BufferA.Data.get(), N: BufferA.byteSize().getQuantity());
1944
1945 BitcastBuffer BufferB(
1946 Bits(ASTCtx.getTypeSize(T: ElemTypeB) * PtrB.getNumElems()));
1947 readPointerToBuffer(Ctx: S.getContext(), FromPtr: PtrB, Buffer&: BufferB, ReturnOnUninit: false);
1948 // FIXME: The swapping here is UNDOING something we do when reading the
1949 // data into the buffer.
1950 if (ASTCtx.getTargetInfo().isBigEndian())
1951 swapBytes(M: BufferB.Data.get(), N: BufferB.byteSize().getQuantity());
1952
1953 size_t MinBufferSize = std::min(a: BufferA.byteSize().getQuantity(),
1954 b: BufferB.byteSize().getQuantity());
1955
1956 unsigned ElemSize = 1;
1957 if (IsWide)
1958 ElemSize = ASTCtx.getTypeSizeInChars(T: ASTCtx.getWCharType()).getQuantity();
1959 // The Size given for the wide variants is in wide-char units. Convert it
1960 // to bytes.
1961 size_t ByteSize = Size.getZExtValue() * ElemSize;
1962 size_t CmpSize = std::min(a: MinBufferSize, b: ByteSize);
1963
1964 for (size_t I = 0; I != CmpSize; I += ElemSize) {
1965 if (IsWide) {
1966 INT_TYPE_SWITCH(*S.getContext().classify(ASTCtx.getWCharType()), {
1967 T A = *reinterpret_cast<T *>(BufferA.Data.get() + I);
1968 T B = *reinterpret_cast<T *>(BufferB.Data.get() + I);
1969 if (A < B) {
1970 pushInteger(S, -1, Call->getType());
1971 return true;
1972 } else if (A > B) {
1973 pushInteger(S, 1, Call->getType());
1974 return true;
1975 }
1976 });
1977 } else {
1978 std::byte A = BufferA.Data[I];
1979 std::byte B = BufferB.Data[I];
1980
1981 if (A < B) {
1982 pushInteger(S, Val: -1, QT: Call->getType());
1983 return true;
1984 } else if (A > B) {
1985 pushInteger(S, Val: 1, QT: Call->getType());
1986 return true;
1987 }
1988 }
1989 }
1990
1991 // We compared CmpSize bytes above. If the limiting factor was the Size
1992 // passed, we're done and the result is equality (0).
1993 if (ByteSize <= CmpSize) {
1994 pushInteger(S, Val: 0, QT: Call->getType());
1995 return true;
1996 }
1997
1998 // However, if we read all the available bytes but were instructed to read
1999 // even more, diagnose this as a "read of dereferenced one-past-the-end
2000 // pointer". This is what would happen if we called CheckLoad() on every array
2001 // element.
2002 S.FFDiag(SI: S.Current->getSource(PC: OpPC), DiagId: diag::note_constexpr_access_past_end)
2003 << AK_Read << S.Current->getRange(PC: OpPC);
2004 return false;
2005}
2006
2007// __builtin_memchr(ptr, int, int)
2008// __builtin_strchr(ptr, int)
2009static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC,
2010 const CallExpr *Call, unsigned ID) {
2011 if (ID == Builtin::BImemchr || ID == Builtin::BIwcschr ||
2012 ID == Builtin::BIstrchr || ID == Builtin::BIwmemchr)
2013 diagnoseNonConstexprBuiltin(S, OpPC, ID);
2014
2015 std::optional<APSInt> MaxLength;
2016 PrimType DesiredT = *S.getContext().classify(E: Call->getArg(Arg: 1));
2017 if (Call->getNumArgs() == 3) {
2018 PrimType MaxT = *S.getContext().classify(E: Call->getArg(Arg: 2));
2019 MaxLength = popToAPSInt(Stk&: S.Stk, T: MaxT);
2020 }
2021 APSInt Desired = popToAPSInt(Stk&: S.Stk, T: DesiredT);
2022 const Pointer &Ptr = S.Stk.pop<Pointer>();
2023
2024 if (MaxLength && MaxLength->isZero()) {
2025 S.Stk.push<Pointer>();
2026 return true;
2027 }
2028
2029 if (Ptr.isDummy())
2030 return false;
2031
2032 // Null is only okay if the given size is 0.
2033 if (Ptr.isZero()) {
2034 S.FFDiag(SI: S.Current->getSource(PC: OpPC), DiagId: diag::note_constexpr_access_null)
2035 << AK_Read;
2036 return false;
2037 }
2038
2039 QualType ElemTy = Ptr.getFieldDesc()->isArray()
2040 ? Ptr.getFieldDesc()->getElemQualType()
2041 : Ptr.getFieldDesc()->getType();
2042 bool IsRawByte = ID == Builtin::BImemchr || ID == Builtin::BI__builtin_memchr;
2043
2044 // Give up on byte-oriented matching against multibyte elements.
2045 if (IsRawByte && !isOneByteCharacterType(T: ElemTy)) {
2046 S.FFDiag(SI: S.Current->getSource(PC: OpPC),
2047 DiagId: diag::note_constexpr_memchr_unsupported)
2048 << S.getASTContext().BuiltinInfo.getQuotedName(ID) << ElemTy;
2049 return false;
2050 }
2051
2052 if (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr) {
2053 // strchr compares directly to the passed integer, and therefore
2054 // always fails if given an int that is not a char.
2055 if (Desired !=
2056 Desired.trunc(width: S.getASTContext().getCharWidth()).getSExtValue()) {
2057 S.Stk.push<Pointer>();
2058 return true;
2059 }
2060 }
2061
2062 uint64_t DesiredVal;
2063 if (ID == Builtin::BIwmemchr || ID == Builtin::BI__builtin_wmemchr ||
2064 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr) {
2065 // wcschr and wmemchr are given a wchar_t to look for. Just use it.
2066 DesiredVal = Desired.getZExtValue();
2067 } else {
2068 DesiredVal = Desired.trunc(width: S.getASTContext().getCharWidth()).getZExtValue();
2069 }
2070
2071 bool StopAtZero =
2072 (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr ||
2073 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr);
2074
2075 PrimType ElemT =
2076 IsRawByte ? PT_Sint8 : *S.getContext().classify(T: getElemType(P: Ptr));
2077
2078 size_t Index = Ptr.getIndex();
2079 size_t Step = 0;
2080 for (;;) {
2081 const Pointer &ElemPtr =
2082 (Index + Step) > 0 ? Ptr.atIndex(Idx: Index + Step) : Ptr;
2083
2084 if (!CheckLoad(S, OpPC, Ptr: ElemPtr))
2085 return false;
2086
2087 uint64_t V;
2088 INT_TYPE_SWITCH_NO_BOOL(
2089 ElemT, { V = static_cast<uint64_t>(ElemPtr.deref<T>().toUnsigned()); });
2090
2091 if (V == DesiredVal) {
2092 S.Stk.push<Pointer>(Args: ElemPtr);
2093 return true;
2094 }
2095
2096 if (StopAtZero && V == 0)
2097 break;
2098
2099 ++Step;
2100 if (MaxLength && Step == MaxLength->getZExtValue())
2101 break;
2102 }
2103
2104 S.Stk.push<Pointer>();
2105 return true;
2106}
2107
2108static unsigned computeFullDescSize(const ASTContext &ASTCtx,
2109 const Descriptor *Desc) {
2110
2111 if (Desc->isPrimitive())
2112 return ASTCtx.getTypeSizeInChars(T: Desc->getType()).getQuantity();
2113
2114 if (Desc->isArray())
2115 return ASTCtx.getTypeSizeInChars(T: Desc->getElemQualType()).getQuantity() *
2116 Desc->getNumElems();
2117
2118 if (Desc->isRecord())
2119 return ASTCtx.getTypeSizeInChars(T: Desc->getType()).getQuantity();
2120
2121 llvm_unreachable("Unhandled descriptor type");
2122 return 0;
2123}
2124
2125static unsigned computePointerOffset(const ASTContext &ASTCtx,
2126 const Pointer &Ptr) {
2127 unsigned Result = 0;
2128
2129 Pointer P = Ptr;
2130 while (P.isArrayElement() || P.isField()) {
2131 P = P.expand();
2132 const Descriptor *D = P.getFieldDesc();
2133
2134 if (P.isArrayElement()) {
2135 unsigned ElemSize =
2136 ASTCtx.getTypeSizeInChars(T: D->getElemQualType()).getQuantity();
2137 if (P.isOnePastEnd())
2138 Result += ElemSize * P.getNumElems();
2139 else
2140 Result += ElemSize * P.getIndex();
2141 P = P.expand().getArray();
2142 } else if (P.isBaseClass()) {
2143
2144 const auto *RD = cast<CXXRecordDecl>(Val: D->asDecl());
2145 bool IsVirtual = Ptr.isVirtualBaseClass();
2146 P = P.getBase();
2147 const Record *BaseRecord = P.getRecord();
2148
2149 const ASTRecordLayout &Layout =
2150 ASTCtx.getASTRecordLayout(D: cast<CXXRecordDecl>(Val: BaseRecord->getDecl()));
2151 if (IsVirtual)
2152 Result += Layout.getVBaseClassOffset(VBase: RD).getQuantity();
2153 else
2154 Result += Layout.getBaseClassOffset(Base: RD).getQuantity();
2155 } else if (P.isField()) {
2156 const FieldDecl *FD = P.getField();
2157 const ASTRecordLayout &Layout =
2158 ASTCtx.getASTRecordLayout(D: FD->getParent());
2159 unsigned FieldIndex = FD->getFieldIndex();
2160 uint64_t FieldOffset =
2161 ASTCtx.toCharUnitsFromBits(BitSize: Layout.getFieldOffset(FieldNo: FieldIndex))
2162 .getQuantity();
2163 Result += FieldOffset;
2164 P = P.getBase();
2165 } else
2166 llvm_unreachable("Unhandled descriptor type");
2167 }
2168
2169 return Result;
2170}
2171
2172static bool interp__builtin_object_size(InterpState &S, CodePtr OpPC,
2173 const InterpFrame *Frame,
2174 const CallExpr *Call) {
2175 PrimType KindT = *S.getContext().classify(E: Call->getArg(Arg: 1));
2176 [[maybe_unused]] unsigned Kind = popToAPSInt(Stk&: S.Stk, T: KindT).getZExtValue();
2177
2178 assert(Kind <= 3 && "unexpected kind");
2179
2180 const Pointer &Ptr = S.Stk.pop<Pointer>();
2181
2182 if (Ptr.isZero())
2183 return false;
2184
2185 const Descriptor *DeclDesc = Ptr.getDeclDesc();
2186 if (!DeclDesc)
2187 return false;
2188
2189 const ASTContext &ASTCtx = S.getASTContext();
2190
2191 unsigned ByteOffset = computePointerOffset(ASTCtx, Ptr);
2192 unsigned FullSize = computeFullDescSize(ASTCtx, Desc: DeclDesc);
2193
2194 pushInteger(S, Val: FullSize - ByteOffset, QT: Call->getType());
2195
2196 return true;
2197}
2198
2199static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC,
2200 const CallExpr *Call) {
2201
2202 if (!S.inConstantContext())
2203 return false;
2204
2205 const Pointer &Ptr = S.Stk.pop<Pointer>();
2206
2207 auto Error = [&](int Diag) {
2208 bool CalledFromStd = false;
2209 const auto *Callee = S.Current->getCallee();
2210 if (Callee && Callee->isInStdNamespace()) {
2211 const IdentifierInfo *Identifier = Callee->getIdentifier();
2212 CalledFromStd = Identifier && Identifier->isStr(Str: "is_within_lifetime");
2213 }
2214 S.CCEDiag(SI: CalledFromStd
2215 ? S.Current->Caller->getSource(PC: S.Current->getRetPC())
2216 : S.Current->getSource(PC: OpPC),
2217 DiagId: diag::err_invalid_is_within_lifetime)
2218 << (CalledFromStd ? "std::is_within_lifetime"
2219 : "__builtin_is_within_lifetime")
2220 << Diag;
2221 return false;
2222 };
2223
2224 if (Ptr.isZero())
2225 return Error(0);
2226 if (Ptr.isOnePastEnd())
2227 return Error(1);
2228
2229 bool Result = Ptr.getLifetime() != Lifetime::Ended;
2230 if (!Ptr.isActive()) {
2231 Result = false;
2232 } else {
2233 if (!CheckLive(S, OpPC, Ptr, AK: AK_Read))
2234 return false;
2235 if (!CheckMutable(S, OpPC, Ptr))
2236 return false;
2237 if (!CheckDummy(S, OpPC, Ptr, AK: AK_Read))
2238 return false;
2239 }
2240
2241 // Check if we're currently running an initializer.
2242 for (InterpFrame *Frame = S.Current; Frame; Frame = Frame->Caller) {
2243 if (const Function *F = Frame->getFunction();
2244 F && F->isConstructor() && Frame->getThis().block() == Ptr.block()) {
2245 return Error(2);
2246 }
2247 }
2248 if (S.EvaluatingDecl && Ptr.getDeclDesc()->asVarDecl() == S.EvaluatingDecl)
2249 return Error(2);
2250
2251 pushInteger(S, Val: Result, QT: Call->getType());
2252 return true;
2253}
2254
2255static bool interp__builtin_elementwise_sat(InterpState &S, CodePtr OpPC,
2256 const CallExpr *Call,
2257 unsigned BuiltinID) {
2258 Call->dumpColor();
2259 assert(Call->getNumArgs() == 2);
2260
2261 // Single integer case.
2262 if (!Call->getArg(Arg: 0)->getType()->isVectorType()) {
2263 assert(!Call->getArg(1)->getType()->isVectorType());
2264 APSInt RHS = popToAPSInt(
2265 Stk&: S.Stk, T: *S.getContext().classify(T: Call->getArg(Arg: 1)->getType()));
2266 APSInt LHS = popToAPSInt(
2267 Stk&: S.Stk, T: *S.getContext().classify(T: Call->getArg(Arg: 0)->getType()));
2268 APInt Result;
2269 if (BuiltinID == Builtin::BI__builtin_elementwise_add_sat) {
2270 Result = LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS);
2271 } else if (BuiltinID == Builtin::BI__builtin_elementwise_sub_sat) {
2272 Result = LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
2273 } else {
2274 llvm_unreachable("Wrong builtin ID");
2275 }
2276
2277 pushInteger(S, Val: APSInt(Result, !LHS.isSigned()), QT: Call->getType());
2278 return true;
2279 }
2280
2281 // Vector case.
2282 assert(Call->getArg(0)->getType()->isVectorType() &&
2283 Call->getArg(1)->getType()->isVectorType());
2284 const auto *VT = Call->getArg(Arg: 0)->getType()->castAs<VectorType>();
2285 assert(VT->getElementType() ==
2286 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2287 assert(VT->getNumElements() ==
2288 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2289 assert(VT->getElementType()->isIntegralOrEnumerationType());
2290
2291 const Pointer &RHS = S.Stk.pop<Pointer>();
2292 const Pointer &LHS = S.Stk.pop<Pointer>();
2293 const Pointer &Dst = S.Stk.peek<Pointer>();
2294 PrimType ElemT = *S.getContext().classify(T: VT->getElementType());
2295 unsigned NumElems = VT->getNumElements();
2296 for (unsigned I = 0; I != NumElems; ++I) {
2297 APSInt Elem1;
2298 APSInt Elem2;
2299 INT_TYPE_SWITCH_NO_BOOL(ElemT, {
2300 Elem1 = LHS.atIndex(I).deref<T>().toAPSInt();
2301 Elem2 = RHS.atIndex(I).deref<T>().toAPSInt();
2302 });
2303
2304 APSInt Result;
2305 if (BuiltinID == Builtin::BI__builtin_elementwise_add_sat) {
2306 Result = APSInt(Elem1.isSigned() ? Elem1.sadd_sat(RHS: Elem2)
2307 : Elem1.uadd_sat(RHS: Elem2),
2308 Call->getType()->isUnsignedIntegerOrEnumerationType());
2309 } else if (BuiltinID == Builtin::BI__builtin_elementwise_sub_sat) {
2310 Result = APSInt(Elem1.isSigned() ? Elem1.ssub_sat(RHS: Elem2)
2311 : Elem1.usub_sat(RHS: Elem2),
2312 Call->getType()->isUnsignedIntegerOrEnumerationType());
2313 } else {
2314 llvm_unreachable("Wrong builtin ID");
2315 }
2316
2317 INT_TYPE_SWITCH_NO_BOOL(ElemT, {
2318 const Pointer &E = Dst.atIndex(I);
2319 E.deref<T>() = static_cast<T>(Result);
2320 E.initialize();
2321 });
2322 }
2323
2324 return true;
2325}
2326
2327bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
2328 uint32_t BuiltinID) {
2329 if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(ID: BuiltinID))
2330 return Invalid(S, OpPC);
2331
2332 const InterpFrame *Frame = S.Current;
2333 switch (BuiltinID) {
2334 case Builtin::BI__builtin_is_constant_evaluated:
2335 return interp__builtin_is_constant_evaluated(S, OpPC, Frame, Call);
2336
2337 case Builtin::BI__builtin_assume:
2338 case Builtin::BI__assume:
2339 return interp__builtin_assume(S, OpPC, Frame, Call);
2340
2341 case Builtin::BI__builtin_strcmp:
2342 case Builtin::BIstrcmp:
2343 case Builtin::BI__builtin_strncmp:
2344 case Builtin::BIstrncmp:
2345 case Builtin::BI__builtin_wcsncmp:
2346 case Builtin::BIwcsncmp:
2347 case Builtin::BI__builtin_wcscmp:
2348 case Builtin::BIwcscmp:
2349 return interp__builtin_strcmp(S, OpPC, Frame, Call, ID: BuiltinID);
2350
2351 case Builtin::BI__builtin_strlen:
2352 case Builtin::BIstrlen:
2353 case Builtin::BI__builtin_wcslen:
2354 case Builtin::BIwcslen:
2355 return interp__builtin_strlen(S, OpPC, Frame, Call, ID: BuiltinID);
2356
2357 case Builtin::BI__builtin_nan:
2358 case Builtin::BI__builtin_nanf:
2359 case Builtin::BI__builtin_nanl:
2360 case Builtin::BI__builtin_nanf16:
2361 case Builtin::BI__builtin_nanf128:
2362 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/false);
2363
2364 case Builtin::BI__builtin_nans:
2365 case Builtin::BI__builtin_nansf:
2366 case Builtin::BI__builtin_nansl:
2367 case Builtin::BI__builtin_nansf16:
2368 case Builtin::BI__builtin_nansf128:
2369 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/true);
2370
2371 case Builtin::BI__builtin_huge_val:
2372 case Builtin::BI__builtin_huge_valf:
2373 case Builtin::BI__builtin_huge_vall:
2374 case Builtin::BI__builtin_huge_valf16:
2375 case Builtin::BI__builtin_huge_valf128:
2376 case Builtin::BI__builtin_inf:
2377 case Builtin::BI__builtin_inff:
2378 case Builtin::BI__builtin_infl:
2379 case Builtin::BI__builtin_inff16:
2380 case Builtin::BI__builtin_inff128:
2381 return interp__builtin_inf(S, OpPC, Frame, Call);
2382
2383 case Builtin::BI__builtin_copysign:
2384 case Builtin::BI__builtin_copysignf:
2385 case Builtin::BI__builtin_copysignl:
2386 case Builtin::BI__builtin_copysignf128:
2387 return interp__builtin_copysign(S, OpPC, Frame);
2388
2389 case Builtin::BI__builtin_fmin:
2390 case Builtin::BI__builtin_fminf:
2391 case Builtin::BI__builtin_fminl:
2392 case Builtin::BI__builtin_fminf16:
2393 case Builtin::BI__builtin_fminf128:
2394 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/false);
2395
2396 case Builtin::BI__builtin_fminimum_num:
2397 case Builtin::BI__builtin_fminimum_numf:
2398 case Builtin::BI__builtin_fminimum_numl:
2399 case Builtin::BI__builtin_fminimum_numf16:
2400 case Builtin::BI__builtin_fminimum_numf128:
2401 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/true);
2402
2403 case Builtin::BI__builtin_fmax:
2404 case Builtin::BI__builtin_fmaxf:
2405 case Builtin::BI__builtin_fmaxl:
2406 case Builtin::BI__builtin_fmaxf16:
2407 case Builtin::BI__builtin_fmaxf128:
2408 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/false);
2409
2410 case Builtin::BI__builtin_fmaximum_num:
2411 case Builtin::BI__builtin_fmaximum_numf:
2412 case Builtin::BI__builtin_fmaximum_numl:
2413 case Builtin::BI__builtin_fmaximum_numf16:
2414 case Builtin::BI__builtin_fmaximum_numf128:
2415 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/true);
2416
2417 case Builtin::BI__builtin_isnan:
2418 return interp__builtin_isnan(S, OpPC, Frame, Call);
2419
2420 case Builtin::BI__builtin_issignaling:
2421 return interp__builtin_issignaling(S, OpPC, Frame, Call);
2422
2423 case Builtin::BI__builtin_isinf:
2424 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/CheckSign: false, Call);
2425
2426 case Builtin::BI__builtin_isinf_sign:
2427 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/CheckSign: true, Call);
2428
2429 case Builtin::BI__builtin_isfinite:
2430 return interp__builtin_isfinite(S, OpPC, Frame, Call);
2431
2432 case Builtin::BI__builtin_isnormal:
2433 return interp__builtin_isnormal(S, OpPC, Frame, Call);
2434
2435 case Builtin::BI__builtin_issubnormal:
2436 return interp__builtin_issubnormal(S, OpPC, Frame, Call);
2437
2438 case Builtin::BI__builtin_iszero:
2439 return interp__builtin_iszero(S, OpPC, Frame, Call);
2440
2441 case Builtin::BI__builtin_signbit:
2442 case Builtin::BI__builtin_signbitf:
2443 case Builtin::BI__builtin_signbitl:
2444 return interp__builtin_signbit(S, OpPC, Frame, Call);
2445
2446 case Builtin::BI__builtin_isgreater:
2447 case Builtin::BI__builtin_isgreaterequal:
2448 case Builtin::BI__builtin_isless:
2449 case Builtin::BI__builtin_islessequal:
2450 case Builtin::BI__builtin_islessgreater:
2451 case Builtin::BI__builtin_isunordered:
2452 return interp_floating_comparison(S, OpPC, Call, ID: BuiltinID);
2453
2454 case Builtin::BI__builtin_isfpclass:
2455 return interp__builtin_isfpclass(S, OpPC, Frame, Call);
2456
2457 case Builtin::BI__builtin_fpclassify:
2458 return interp__builtin_fpclassify(S, OpPC, Frame, Call);
2459
2460 case Builtin::BI__builtin_fabs:
2461 case Builtin::BI__builtin_fabsf:
2462 case Builtin::BI__builtin_fabsl:
2463 case Builtin::BI__builtin_fabsf128:
2464 return interp__builtin_fabs(S, OpPC, Frame);
2465
2466 case Builtin::BI__builtin_abs:
2467 case Builtin::BI__builtin_labs:
2468 case Builtin::BI__builtin_llabs:
2469 return interp__builtin_abs(S, OpPC, Frame, Call);
2470
2471 case Builtin::BI__builtin_popcount:
2472 case Builtin::BI__builtin_popcountl:
2473 case Builtin::BI__builtin_popcountll:
2474 case Builtin::BI__builtin_popcountg:
2475 case Builtin::BI__popcnt16: // Microsoft variants of popcount
2476 case Builtin::BI__popcnt:
2477 case Builtin::BI__popcnt64:
2478 return interp__builtin_popcount(S, OpPC, Frame, Call);
2479
2480 case Builtin::BI__builtin_parity:
2481 case Builtin::BI__builtin_parityl:
2482 case Builtin::BI__builtin_parityll:
2483 return interp__builtin_parity(S, OpPC, Frame, Call);
2484
2485 case Builtin::BI__builtin_clrsb:
2486 case Builtin::BI__builtin_clrsbl:
2487 case Builtin::BI__builtin_clrsbll:
2488 return interp__builtin_clrsb(S, OpPC, Frame, Call);
2489
2490 case Builtin::BI__builtin_bitreverse8:
2491 case Builtin::BI__builtin_bitreverse16:
2492 case Builtin::BI__builtin_bitreverse32:
2493 case Builtin::BI__builtin_bitreverse64:
2494 return interp__builtin_bitreverse(S, OpPC, Frame, Call);
2495
2496 case Builtin::BI__builtin_classify_type:
2497 return interp__builtin_classify_type(S, OpPC, Frame, Call);
2498
2499 case Builtin::BI__builtin_expect:
2500 case Builtin::BI__builtin_expect_with_probability:
2501 return interp__builtin_expect(S, OpPC, Frame, Call);
2502
2503 case Builtin::BI__builtin_rotateleft8:
2504 case Builtin::BI__builtin_rotateleft16:
2505 case Builtin::BI__builtin_rotateleft32:
2506 case Builtin::BI__builtin_rotateleft64:
2507 case Builtin::BI_rotl8: // Microsoft variants of rotate left
2508 case Builtin::BI_rotl16:
2509 case Builtin::BI_rotl:
2510 case Builtin::BI_lrotl:
2511 case Builtin::BI_rotl64:
2512 return interp__builtin_rotate(S, OpPC, Frame, Call, /*Right=*/false);
2513
2514 case Builtin::BI__builtin_rotateright8:
2515 case Builtin::BI__builtin_rotateright16:
2516 case Builtin::BI__builtin_rotateright32:
2517 case Builtin::BI__builtin_rotateright64:
2518 case Builtin::BI_rotr8: // Microsoft variants of rotate right
2519 case Builtin::BI_rotr16:
2520 case Builtin::BI_rotr:
2521 case Builtin::BI_lrotr:
2522 case Builtin::BI_rotr64:
2523 return interp__builtin_rotate(S, OpPC, Frame, Call, /*Right=*/true);
2524
2525 case Builtin::BI__builtin_ffs:
2526 case Builtin::BI__builtin_ffsl:
2527 case Builtin::BI__builtin_ffsll:
2528 return interp__builtin_ffs(S, OpPC, Frame, Call);
2529
2530 case Builtin::BIaddressof:
2531 case Builtin::BI__addressof:
2532 case Builtin::BI__builtin_addressof:
2533 assert(isNoopBuiltin(BuiltinID));
2534 return interp__builtin_addressof(S, OpPC, Frame, Call);
2535
2536 case Builtin::BIas_const:
2537 case Builtin::BIforward:
2538 case Builtin::BIforward_like:
2539 case Builtin::BImove:
2540 case Builtin::BImove_if_noexcept:
2541 assert(isNoopBuiltin(BuiltinID));
2542 return interp__builtin_move(S, OpPC, Frame, Call);
2543
2544 case Builtin::BI__builtin_eh_return_data_regno:
2545 return interp__builtin_eh_return_data_regno(S, OpPC, Frame, Call);
2546
2547 case Builtin::BI__builtin_launder:
2548 assert(isNoopBuiltin(BuiltinID));
2549 return true;
2550
2551 case Builtin::BI__builtin_add_overflow:
2552 case Builtin::BI__builtin_sub_overflow:
2553 case Builtin::BI__builtin_mul_overflow:
2554 case Builtin::BI__builtin_sadd_overflow:
2555 case Builtin::BI__builtin_uadd_overflow:
2556 case Builtin::BI__builtin_uaddl_overflow:
2557 case Builtin::BI__builtin_uaddll_overflow:
2558 case Builtin::BI__builtin_usub_overflow:
2559 case Builtin::BI__builtin_usubl_overflow:
2560 case Builtin::BI__builtin_usubll_overflow:
2561 case Builtin::BI__builtin_umul_overflow:
2562 case Builtin::BI__builtin_umull_overflow:
2563 case Builtin::BI__builtin_umulll_overflow:
2564 case Builtin::BI__builtin_saddl_overflow:
2565 case Builtin::BI__builtin_saddll_overflow:
2566 case Builtin::BI__builtin_ssub_overflow:
2567 case Builtin::BI__builtin_ssubl_overflow:
2568 case Builtin::BI__builtin_ssubll_overflow:
2569 case Builtin::BI__builtin_smul_overflow:
2570 case Builtin::BI__builtin_smull_overflow:
2571 case Builtin::BI__builtin_smulll_overflow:
2572 return interp__builtin_overflowop(S, OpPC, Call, BuiltinOp: BuiltinID);
2573
2574 case Builtin::BI__builtin_addcb:
2575 case Builtin::BI__builtin_addcs:
2576 case Builtin::BI__builtin_addc:
2577 case Builtin::BI__builtin_addcl:
2578 case Builtin::BI__builtin_addcll:
2579 case Builtin::BI__builtin_subcb:
2580 case Builtin::BI__builtin_subcs:
2581 case Builtin::BI__builtin_subc:
2582 case Builtin::BI__builtin_subcl:
2583 case Builtin::BI__builtin_subcll:
2584 return interp__builtin_carryop(S, OpPC, Frame, Call, BuiltinOp: BuiltinID);
2585
2586 case Builtin::BI__builtin_clz:
2587 case Builtin::BI__builtin_clzl:
2588 case Builtin::BI__builtin_clzll:
2589 case Builtin::BI__builtin_clzs:
2590 case Builtin::BI__builtin_clzg:
2591 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
2592 case Builtin::BI__lzcnt:
2593 case Builtin::BI__lzcnt64:
2594 return interp__builtin_clz(S, OpPC, Frame, Call, BuiltinOp: BuiltinID);
2595
2596 case Builtin::BI__builtin_ctz:
2597 case Builtin::BI__builtin_ctzl:
2598 case Builtin::BI__builtin_ctzll:
2599 case Builtin::BI__builtin_ctzs:
2600 case Builtin::BI__builtin_ctzg:
2601 return interp__builtin_ctz(S, OpPC, Frame, Call, BuiltinID);
2602
2603 case Builtin::BI__builtin_bswap16:
2604 case Builtin::BI__builtin_bswap32:
2605 case Builtin::BI__builtin_bswap64:
2606 return interp__builtin_bswap(S, OpPC, Frame, Call);
2607
2608 case Builtin::BI__atomic_always_lock_free:
2609 case Builtin::BI__atomic_is_lock_free:
2610 return interp__builtin_atomic_lock_free(S, OpPC, Frame, Call, BuiltinOp: BuiltinID);
2611
2612 case Builtin::BI__c11_atomic_is_lock_free:
2613 return interp__builtin_c11_atomic_is_lock_free(S, OpPC, Frame, Call);
2614
2615 case Builtin::BI__builtin_complex:
2616 return interp__builtin_complex(S, OpPC, Frame, Call);
2617
2618 case Builtin::BI__builtin_is_aligned:
2619 case Builtin::BI__builtin_align_up:
2620 case Builtin::BI__builtin_align_down:
2621 return interp__builtin_is_aligned_up_down(S, OpPC, Frame, Call, BuiltinOp: BuiltinID);
2622
2623 case Builtin::BI__builtin_assume_aligned:
2624 return interp__builtin_assume_aligned(S, OpPC, Frame, Call);
2625
2626 case clang::X86::BI__builtin_ia32_bextr_u32:
2627 case clang::X86::BI__builtin_ia32_bextr_u64:
2628 case clang::X86::BI__builtin_ia32_bextri_u32:
2629 case clang::X86::BI__builtin_ia32_bextri_u64:
2630 return interp__builtin_ia32_bextr(S, OpPC, Frame, Call);
2631
2632 case clang::X86::BI__builtin_ia32_bzhi_si:
2633 case clang::X86::BI__builtin_ia32_bzhi_di:
2634 return interp__builtin_ia32_bzhi(S, OpPC, Frame, Call);
2635
2636 case clang::X86::BI__builtin_ia32_lzcnt_u16:
2637 case clang::X86::BI__builtin_ia32_lzcnt_u32:
2638 case clang::X86::BI__builtin_ia32_lzcnt_u64:
2639 return interp__builtin_ia32_lzcnt(S, OpPC, Frame, Call);
2640
2641 case clang::X86::BI__builtin_ia32_tzcnt_u16:
2642 case clang::X86::BI__builtin_ia32_tzcnt_u32:
2643 case clang::X86::BI__builtin_ia32_tzcnt_u64:
2644 return interp__builtin_ia32_tzcnt(S, OpPC, Frame, Call);
2645
2646 case clang::X86::BI__builtin_ia32_pdep_si:
2647 case clang::X86::BI__builtin_ia32_pdep_di:
2648 return interp__builtin_ia32_pdep(S, OpPC, Frame, Call);
2649
2650 case clang::X86::BI__builtin_ia32_pext_si:
2651 case clang::X86::BI__builtin_ia32_pext_di:
2652 return interp__builtin_ia32_pext(S, OpPC, Frame, Call);
2653
2654 case clang::X86::BI__builtin_ia32_addcarryx_u32:
2655 case clang::X86::BI__builtin_ia32_addcarryx_u64:
2656 case clang::X86::BI__builtin_ia32_subborrow_u32:
2657 case clang::X86::BI__builtin_ia32_subborrow_u64:
2658 return interp__builtin_ia32_addcarry_subborrow(S, OpPC, Frame, Call,
2659 BuiltinOp: BuiltinID);
2660
2661 case Builtin::BI__builtin_os_log_format_buffer_size:
2662 return interp__builtin_os_log_format_buffer_size(S, OpPC, Frame, Call);
2663
2664 case Builtin::BI__builtin_ptrauth_string_discriminator:
2665 return interp__builtin_ptrauth_string_discriminator(S, OpPC, Frame, Call);
2666
2667 case Builtin::BI__noop:
2668 pushInteger(S, Val: 0, QT: Call->getType());
2669 return true;
2670
2671 case Builtin::BI__builtin_operator_new:
2672 return interp__builtin_operator_new(S, OpPC, Frame, Call);
2673
2674 case Builtin::BI__builtin_operator_delete:
2675 return interp__builtin_operator_delete(S, OpPC, Frame, Call);
2676
2677 case Builtin::BI__arithmetic_fence:
2678 return interp__builtin_arithmetic_fence(S, OpPC, Frame, Call);
2679
2680 case Builtin::BI__builtin_reduce_add:
2681 case Builtin::BI__builtin_reduce_mul:
2682 case Builtin::BI__builtin_reduce_and:
2683 case Builtin::BI__builtin_reduce_or:
2684 case Builtin::BI__builtin_reduce_xor:
2685 case Builtin::BI__builtin_reduce_min:
2686 case Builtin::BI__builtin_reduce_max:
2687 return interp__builtin_vector_reduce(S, OpPC, Call, ID: BuiltinID);
2688
2689 case Builtin::BI__builtin_elementwise_popcount:
2690 case Builtin::BI__builtin_elementwise_bitreverse:
2691 return interp__builtin_elementwise_popcount(S, OpPC, Frame, Call,
2692 BuiltinID);
2693
2694 case Builtin::BI__builtin_memcpy:
2695 case Builtin::BImemcpy:
2696 case Builtin::BI__builtin_wmemcpy:
2697 case Builtin::BIwmemcpy:
2698 case Builtin::BI__builtin_memmove:
2699 case Builtin::BImemmove:
2700 case Builtin::BI__builtin_wmemmove:
2701 case Builtin::BIwmemmove:
2702 return interp__builtin_memcpy(S, OpPC, Frame, Call, ID: BuiltinID);
2703
2704 case Builtin::BI__builtin_memcmp:
2705 case Builtin::BImemcmp:
2706 case Builtin::BI__builtin_bcmp:
2707 case Builtin::BIbcmp:
2708 case Builtin::BI__builtin_wmemcmp:
2709 case Builtin::BIwmemcmp:
2710 return interp__builtin_memcmp(S, OpPC, Frame, Call, ID: BuiltinID);
2711
2712 case Builtin::BImemchr:
2713 case Builtin::BI__builtin_memchr:
2714 case Builtin::BIstrchr:
2715 case Builtin::BI__builtin_strchr:
2716 case Builtin::BIwmemchr:
2717 case Builtin::BI__builtin_wmemchr:
2718 case Builtin::BIwcschr:
2719 case Builtin::BI__builtin_wcschr:
2720 case Builtin::BI__builtin_char_memchr:
2721 return interp__builtin_memchr(S, OpPC, Call, ID: BuiltinID);
2722
2723 case Builtin::BI__builtin_object_size:
2724 case Builtin::BI__builtin_dynamic_object_size:
2725 return interp__builtin_object_size(S, OpPC, Frame, Call);
2726
2727 case Builtin::BI__builtin_is_within_lifetime:
2728 return interp__builtin_is_within_lifetime(S, OpPC, Call);
2729
2730 case Builtin::BI__builtin_elementwise_add_sat:
2731 case Builtin::BI__builtin_elementwise_sub_sat:
2732 return interp__builtin_elementwise_sat(S, OpPC, Call, BuiltinID);
2733
2734 default:
2735 S.FFDiag(Loc: S.Current->getLocation(PC: OpPC),
2736 DiagId: diag::note_invalid_subexpr_in_const_expr)
2737 << S.Current->getRange(PC: OpPC);
2738
2739 return false;
2740 }
2741
2742 llvm_unreachable("Unhandled builtin ID");
2743}
2744
2745bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
2746 ArrayRef<int64_t> ArrayIndices, int64_t &IntResult) {
2747 CharUnits Result;
2748 unsigned N = E->getNumComponents();
2749 assert(N > 0);
2750
2751 unsigned ArrayIndex = 0;
2752 QualType CurrentType = E->getTypeSourceInfo()->getType();
2753 for (unsigned I = 0; I != N; ++I) {
2754 const OffsetOfNode &Node = E->getComponent(Idx: I);
2755 switch (Node.getKind()) {
2756 case OffsetOfNode::Field: {
2757 const FieldDecl *MemberDecl = Node.getField();
2758 const RecordType *RT = CurrentType->getAs<RecordType>();
2759 if (!RT)
2760 return false;
2761 const RecordDecl *RD = RT->getDecl();
2762 if (RD->isInvalidDecl())
2763 return false;
2764 const ASTRecordLayout &RL = S.getASTContext().getASTRecordLayout(D: RD);
2765 unsigned FieldIndex = MemberDecl->getFieldIndex();
2766 assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
2767 Result +=
2768 S.getASTContext().toCharUnitsFromBits(BitSize: RL.getFieldOffset(FieldNo: FieldIndex));
2769 CurrentType = MemberDecl->getType().getNonReferenceType();
2770 break;
2771 }
2772 case OffsetOfNode::Array: {
2773 // When generating bytecode, we put all the index expressions as Sint64 on
2774 // the stack.
2775 int64_t Index = ArrayIndices[ArrayIndex];
2776 const ArrayType *AT = S.getASTContext().getAsArrayType(T: CurrentType);
2777 if (!AT)
2778 return false;
2779 CurrentType = AT->getElementType();
2780 CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(T: CurrentType);
2781 Result += Index * ElementSize;
2782 ++ArrayIndex;
2783 break;
2784 }
2785 case OffsetOfNode::Base: {
2786 const CXXBaseSpecifier *BaseSpec = Node.getBase();
2787 if (BaseSpec->isVirtual())
2788 return false;
2789
2790 // Find the layout of the class whose base we are looking into.
2791 const RecordType *RT = CurrentType->getAs<RecordType>();
2792 if (!RT)
2793 return false;
2794 const RecordDecl *RD = RT->getDecl();
2795 if (RD->isInvalidDecl())
2796 return false;
2797 const ASTRecordLayout &RL = S.getASTContext().getASTRecordLayout(D: RD);
2798
2799 // Find the base class itself.
2800 CurrentType = BaseSpec->getType();
2801 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2802 if (!BaseRT)
2803 return false;
2804
2805 // Add the offset to the base.
2806 Result += RL.getBaseClassOffset(Base: cast<CXXRecordDecl>(Val: BaseRT->getDecl()));
2807 break;
2808 }
2809 case OffsetOfNode::Identifier:
2810 llvm_unreachable("Dependent OffsetOfExpr?");
2811 }
2812 }
2813
2814 IntResult = Result.getQuantity();
2815
2816 return true;
2817}
2818
2819bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC,
2820 const Pointer &Ptr, const APSInt &IntValue) {
2821
2822 const Record *R = Ptr.getRecord();
2823 assert(R);
2824 assert(R->getNumFields() == 1);
2825
2826 unsigned FieldOffset = R->getField(I: 0u)->Offset;
2827 const Pointer &FieldPtr = Ptr.atField(Off: FieldOffset);
2828 PrimType FieldT = *S.getContext().classify(T: FieldPtr.getType());
2829
2830 INT_TYPE_SWITCH(FieldT,
2831 FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
2832 FieldPtr.initialize();
2833 return true;
2834}
2835
2836static void zeroAll(Pointer &Dest) {
2837 const Descriptor *Desc = Dest.getFieldDesc();
2838
2839 if (Desc->isPrimitive()) {
2840 TYPE_SWITCH(Desc->getPrimType(), {
2841 Dest.deref<T>().~T();
2842 new (&Dest.deref<T>()) T();
2843 });
2844 return;
2845 }
2846
2847 if (Desc->isRecord()) {
2848 const Record *R = Desc->ElemRecord;
2849 for (const Record::Field &F : R->fields()) {
2850 Pointer FieldPtr = Dest.atField(Off: F.Offset);
2851 zeroAll(Dest&: FieldPtr);
2852 }
2853 return;
2854 }
2855
2856 if (Desc->isPrimitiveArray()) {
2857 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
2858 TYPE_SWITCH(Desc->getPrimType(), {
2859 Dest.deref<T>().~T();
2860 new (&Dest.deref<T>()) T();
2861 });
2862 }
2863 return;
2864 }
2865
2866 if (Desc->isCompositeArray()) {
2867 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
2868 Pointer ElemPtr = Dest.atIndex(Idx: I).narrow();
2869 zeroAll(Dest&: ElemPtr);
2870 }
2871 return;
2872 }
2873}
2874
2875static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
2876 Pointer &Dest, bool Activate);
2877static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src,
2878 Pointer &Dest, bool Activate = false) {
2879 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
2880 const Descriptor *DestDesc = Dest.getFieldDesc();
2881
2882 auto copyField = [&](const Record::Field &F, bool Activate) -> bool {
2883 Pointer DestField = Dest.atField(Off: F.Offset);
2884 if (std::optional<PrimType> FT = S.Ctx.classify(T: F.Decl->getType())) {
2885 TYPE_SWITCH(*FT, {
2886 DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
2887 if (Src.atField(F.Offset).isInitialized())
2888 DestField.initialize();
2889 if (Activate)
2890 DestField.activate();
2891 });
2892 return true;
2893 }
2894 // Composite field.
2895 return copyComposite(S, OpPC, Src: Src.atField(Off: F.Offset), Dest&: DestField, Activate);
2896 };
2897
2898 assert(SrcDesc->isRecord());
2899 assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
2900 const Record *R = DestDesc->ElemRecord;
2901 for (const Record::Field &F : R->fields()) {
2902 if (R->isUnion()) {
2903 // For unions, only copy the active field. Zero all others.
2904 const Pointer &SrcField = Src.atField(Off: F.Offset);
2905 if (SrcField.isActive()) {
2906 if (!copyField(F, /*Activate=*/true))
2907 return false;
2908 } else {
2909 Pointer DestField = Dest.atField(Off: F.Offset);
2910 zeroAll(Dest&: DestField);
2911 }
2912 } else {
2913 if (!copyField(F, Activate))
2914 return false;
2915 }
2916 }
2917
2918 for (const Record::Base &B : R->bases()) {
2919 Pointer DestBase = Dest.atField(Off: B.Offset);
2920 if (!copyRecord(S, OpPC, Src: Src.atField(Off: B.Offset), Dest&: DestBase, Activate))
2921 return false;
2922 }
2923
2924 Dest.initialize();
2925 return true;
2926}
2927
2928static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
2929 Pointer &Dest, bool Activate = false) {
2930 assert(Src.isLive() && Dest.isLive());
2931
2932 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
2933 const Descriptor *DestDesc = Dest.getFieldDesc();
2934
2935 assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
2936
2937 if (DestDesc->isPrimitiveArray()) {
2938 assert(SrcDesc->isPrimitiveArray());
2939 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
2940 PrimType ET = DestDesc->getPrimType();
2941 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
2942 Pointer DestElem = Dest.atIndex(Idx: I);
2943 TYPE_SWITCH(ET, {
2944 DestElem.deref<T>() = Src.atIndex(I).deref<T>();
2945 DestElem.initialize();
2946 });
2947 }
2948 return true;
2949 }
2950
2951 if (DestDesc->isCompositeArray()) {
2952 assert(SrcDesc->isCompositeArray());
2953 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
2954 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
2955 const Pointer &SrcElem = Src.atIndex(Idx: I).narrow();
2956 Pointer DestElem = Dest.atIndex(Idx: I).narrow();
2957 if (!copyComposite(S, OpPC, Src: SrcElem, Dest&: DestElem, Activate))
2958 return false;
2959 }
2960 return true;
2961 }
2962
2963 if (DestDesc->isRecord())
2964 return copyRecord(S, OpPC, Src, Dest, Activate);
2965 return Invalid(S, OpPC);
2966}
2967
2968bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
2969 return copyComposite(S, OpPC, Src, Dest);
2970}
2971
2972} // namespace interp
2973} // namespace clang
2974

source code of clang/lib/AST/ByteCode/InterpBuiltin.cpp