1//===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8#include "../ExprConstShared.h"
9#include "Boolean.h"
10#include "Interp.h"
11#include "PrimType.h"
12#include "clang/AST/OSLog.h"
13#include "clang/AST/RecordLayout.h"
14#include "clang/Basic/Builtins.h"
15#include "clang/Basic/TargetInfo.h"
16
17namespace clang {
18namespace interp {
19
20static unsigned callArgSize(const InterpState &S, const CallExpr *C) {
21 unsigned O = 0;
22
23 for (const Expr *E : C->arguments()) {
24 O += align(Size: primSize(Type: *S.getContext().classify(E)));
25 }
26
27 return O;
28}
29
30template <typename T>
31static T getParam(const InterpFrame *Frame, unsigned Index) {
32 assert(Frame->getFunction()->getNumParams() > Index);
33 unsigned Offset = Frame->getFunction()->getParamOffset(ParamIndex: Index);
34 return Frame->getParam<T>(Offset);
35}
36
37PrimType getIntPrimType(const InterpState &S) {
38 const TargetInfo &TI = S.getCtx().getTargetInfo();
39 unsigned IntWidth = TI.getIntWidth();
40
41 if (IntWidth == 32)
42 return PT_Sint32;
43 else if (IntWidth == 16)
44 return PT_Sint16;
45 llvm_unreachable("Int isn't 16 or 32 bit?");
46}
47
48PrimType getLongPrimType(const InterpState &S) {
49 const TargetInfo &TI = S.getCtx().getTargetInfo();
50 unsigned LongWidth = TI.getLongWidth();
51
52 if (LongWidth == 64)
53 return PT_Sint64;
54 else if (LongWidth == 32)
55 return PT_Sint32;
56 else if (LongWidth == 16)
57 return PT_Sint16;
58 llvm_unreachable("long isn't 16, 32 or 64 bit?");
59}
60
61/// Peek an integer value from the stack into an APSInt.
62static APSInt peekToAPSInt(InterpStack &Stk, PrimType T, size_t Offset = 0) {
63 if (Offset == 0)
64 Offset = align(Size: primSize(Type: T));
65
66 APSInt R;
67 INT_TYPE_SWITCH(T, R = Stk.peek<T>(Offset).toAPSInt());
68
69 return R;
70}
71
72/// Pushes \p Val on the stack as the type given by \p QT.
73static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
74 assert(QT->isSignedIntegerOrEnumerationType() ||
75 QT->isUnsignedIntegerOrEnumerationType());
76 std::optional<PrimType> T = S.getContext().classify(T: QT);
77 assert(T);
78
79 if (QT->isSignedIntegerOrEnumerationType()) {
80 int64_t V = Val.getSExtValue();
81 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V)); });
82 } else {
83 assert(QT->isUnsignedIntegerOrEnumerationType());
84 uint64_t V = Val.getZExtValue();
85 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V)); });
86 }
87}
88
89template <typename T>
90static void pushInteger(InterpState &S, T Val, QualType QT) {
91 if constexpr (std::is_same_v<T, APInt>)
92 pushInteger(S, Val: APSInt(Val, !std::is_signed_v<T>), QT);
93 else
94 pushInteger(S,
95 Val: APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
96 std::is_signed_v<T>),
97 !std::is_signed_v<T>),
98 QT);
99}
100
101static void assignInteger(Pointer &Dest, PrimType ValueT, const APSInt &Value) {
102 INT_TYPE_SWITCH_NO_BOOL(
103 ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
104}
105
106static bool retPrimValue(InterpState &S, CodePtr OpPC, APValue &Result,
107 std::optional<PrimType> &T) {
108 if (!T)
109 return RetVoid(S, PC&: OpPC, Result);
110
111#define RET_CASE(X) \
112 case X: \
113 return Ret<X>(S, OpPC, Result);
114 switch (*T) {
115 RET_CASE(PT_Ptr);
116 RET_CASE(PT_FnPtr);
117 RET_CASE(PT_Float);
118 RET_CASE(PT_Bool);
119 RET_CASE(PT_Sint8);
120 RET_CASE(PT_Uint8);
121 RET_CASE(PT_Sint16);
122 RET_CASE(PT_Uint16);
123 RET_CASE(PT_Sint32);
124 RET_CASE(PT_Uint32);
125 RET_CASE(PT_Sint64);
126 RET_CASE(PT_Uint64);
127 default:
128 llvm_unreachable("Unsupported return type for builtin function");
129 }
130#undef RET_CASE
131}
132
133static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC,
134 const InterpFrame *Frame,
135 const CallExpr *Call) {
136 // The current frame is the one for __builtin_is_constant_evaluated.
137 // The one above that, potentially the one for std::is_constant_evaluated().
138 if (S.inConstantContext() && !S.checkingPotentialConstantExpression() &&
139 Frame->Caller && S.getEvalStatus().Diag) {
140 auto isStdCall = [](const FunctionDecl *F) -> bool {
141 return F && F->isInStdNamespace() && F->getIdentifier() &&
142 F->getIdentifier()->isStr("is_constant_evaluated");
143 };
144 const InterpFrame *Caller = Frame->Caller;
145
146 if (Caller->Caller && isStdCall(Caller->getCallee())) {
147 const Expr *E = Caller->Caller->getExpr(PC: Caller->getRetPC());
148 S.report(E->getExprLoc(),
149 diag::warn_is_constant_evaluated_always_true_constexpr)
150 << "std::is_constant_evaluated";
151 } else {
152 const Expr *E = Frame->Caller->getExpr(PC: Frame->getRetPC());
153 S.report(E->getExprLoc(),
154 diag::warn_is_constant_evaluated_always_true_constexpr)
155 << "__builtin_is_constant_evaluated";
156 }
157 }
158
159 S.Stk.push<Boolean>(Args: Boolean::from(Value: S.inConstantContext()));
160 return true;
161}
162
163static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC,
164 const InterpFrame *Frame,
165 const CallExpr *Call) {
166 const Pointer &A = getParam<Pointer>(Frame, Index: 0);
167 const Pointer &B = getParam<Pointer>(Frame, Index: 1);
168
169 if (!CheckLive(S, OpPC, Ptr: A, AK: AK_Read) || !CheckLive(S, OpPC, Ptr: B, AK: AK_Read))
170 return false;
171
172 if (A.isDummy() || B.isDummy())
173 return false;
174
175 assert(A.getFieldDesc()->isPrimitiveArray());
176 assert(B.getFieldDesc()->isPrimitiveArray());
177
178 unsigned IndexA = A.getIndex();
179 unsigned IndexB = B.getIndex();
180 int32_t Result = 0;
181 for (;; ++IndexA, ++IndexB) {
182 const Pointer &PA = A.atIndex(Idx: IndexA);
183 const Pointer &PB = B.atIndex(Idx: IndexB);
184 if (!CheckRange(S, OpPC, Ptr: PA, AK: AK_Read) ||
185 !CheckRange(S, OpPC, Ptr: PB, AK: AK_Read)) {
186 return false;
187 }
188 uint8_t CA = PA.deref<uint8_t>();
189 uint8_t CB = PB.deref<uint8_t>();
190
191 if (CA > CB) {
192 Result = 1;
193 break;
194 } else if (CA < CB) {
195 Result = -1;
196 break;
197 }
198 if (CA == 0 || CB == 0)
199 break;
200 }
201
202 pushInteger(S, Result, Call->getType());
203 return true;
204}
205
206static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC,
207 const InterpFrame *Frame,
208 const CallExpr *Call) {
209 const Pointer &StrPtr = getParam<Pointer>(Frame, Index: 0);
210
211 if (!CheckArray(S, OpPC, Ptr: StrPtr))
212 return false;
213
214 if (!CheckLive(S, OpPC, Ptr: StrPtr, AK: AK_Read))
215 return false;
216
217 if (!CheckDummy(S, OpPC, Ptr: StrPtr))
218 return false;
219
220 assert(StrPtr.getFieldDesc()->isPrimitiveArray());
221
222 size_t Len = 0;
223 for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
224 const Pointer &ElemPtr = StrPtr.atIndex(Idx: I);
225
226 if (!CheckRange(S, OpPC, Ptr: ElemPtr, AK: AK_Read))
227 return false;
228
229 uint8_t Val = ElemPtr.deref<uint8_t>();
230 if (Val == 0)
231 break;
232 }
233
234 pushInteger(S, Len, Call->getType());
235
236 return true;
237}
238
239static bool interp__builtin_nan(InterpState &S, CodePtr OpPC,
240 const InterpFrame *Frame, const Function *F,
241 bool Signaling) {
242 const Pointer &Arg = getParam<Pointer>(Frame, Index: 0);
243
244 if (!CheckLoad(S, OpPC, Ptr: Arg))
245 return false;
246
247 assert(Arg.getFieldDesc()->isPrimitiveArray());
248
249 // Convert the given string to an integer using StringRef's API.
250 llvm::APInt Fill;
251 std::string Str;
252 assert(Arg.getNumElems() >= 1);
253 for (unsigned I = 0;; ++I) {
254 const Pointer &Elem = Arg.atIndex(Idx: I);
255
256 if (!CheckLoad(S, OpPC, Ptr: Elem))
257 return false;
258
259 if (Elem.deref<int8_t>() == 0)
260 break;
261
262 Str += Elem.deref<char>();
263 }
264
265 // Treat empty strings as if they were zero.
266 if (Str.empty())
267 Fill = llvm::APInt(32, 0);
268 else if (StringRef(Str).getAsInteger(Radix: 0, Result&: Fill))
269 return false;
270
271 const llvm::fltSemantics &TargetSemantics =
272 S.getCtx().getFloatTypeSemantics(T: F->getDecl()->getReturnType());
273
274 Floating Result;
275 if (S.getCtx().getTargetInfo().isNan2008()) {
276 if (Signaling)
277 Result = Floating(
278 llvm::APFloat::getSNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill));
279 else
280 Result = Floating(
281 llvm::APFloat::getQNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill));
282 } else {
283 // Prior to IEEE 754-2008, architectures were allowed to choose whether
284 // the first bit of their significand was set for qNaN or sNaN. MIPS chose
285 // a different encoding to what became a standard in 2008, and for pre-
286 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
287 // sNaN. This is now known as "legacy NaN" encoding.
288 if (Signaling)
289 Result = Floating(
290 llvm::APFloat::getQNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill));
291 else
292 Result = Floating(
293 llvm::APFloat::getSNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill));
294 }
295
296 S.Stk.push<Floating>(Args&: Result);
297 return true;
298}
299
300static bool interp__builtin_inf(InterpState &S, CodePtr OpPC,
301 const InterpFrame *Frame, const Function *F) {
302 const llvm::fltSemantics &TargetSemantics =
303 S.getCtx().getFloatTypeSemantics(T: F->getDecl()->getReturnType());
304
305 S.Stk.push<Floating>(Args: Floating::getInf(Sem: TargetSemantics));
306 return true;
307}
308
309static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC,
310 const InterpFrame *Frame,
311 const Function *F) {
312 const Floating &Arg1 = getParam<Floating>(Frame, Index: 0);
313 const Floating &Arg2 = getParam<Floating>(Frame, Index: 1);
314
315 APFloat Copy = Arg1.getAPFloat();
316 Copy.copySign(RHS: Arg2.getAPFloat());
317 S.Stk.push<Floating>(Args: Floating(Copy));
318
319 return true;
320}
321
322static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC,
323 const InterpFrame *Frame, const Function *F) {
324 const Floating &LHS = getParam<Floating>(Frame, Index: 0);
325 const Floating &RHS = getParam<Floating>(Frame, Index: 1);
326
327 Floating Result;
328
329 // When comparing zeroes, return -0.0 if one of the zeroes is negative.
330 if (LHS.isZero() && RHS.isZero() && RHS.isNegative())
331 Result = RHS;
332 else if (LHS.isNan() || RHS < LHS)
333 Result = RHS;
334 else
335 Result = LHS;
336
337 S.Stk.push<Floating>(Args&: Result);
338 return true;
339}
340
341static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC,
342 const InterpFrame *Frame,
343 const Function *Func) {
344 const Floating &LHS = getParam<Floating>(Frame, Index: 0);
345 const Floating &RHS = getParam<Floating>(Frame, Index: 1);
346
347 Floating Result;
348
349 // When comparing zeroes, return +0.0 if one of the zeroes is positive.
350 if (LHS.isZero() && RHS.isZero() && LHS.isNegative())
351 Result = RHS;
352 else if (LHS.isNan() || RHS > LHS)
353 Result = RHS;
354 else
355 Result = LHS;
356
357 S.Stk.push<Floating>(Args&: Result);
358 return true;
359}
360
361/// Defined as __builtin_isnan(...), to accommodate the fact that it can
362/// take a float, double, long double, etc.
363/// But for us, that's all a Floating anyway.
364static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC,
365 const InterpFrame *Frame, const Function *F,
366 const CallExpr *Call) {
367 const Floating &Arg = S.Stk.peek<Floating>();
368
369 pushInteger(S, Arg.isNan(), Call->getType());
370 return true;
371}
372
373static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC,
374 const InterpFrame *Frame,
375 const Function *F,
376 const CallExpr *Call) {
377 const Floating &Arg = S.Stk.peek<Floating>();
378
379 pushInteger(S, Arg.isSignaling(), Call->getType());
380 return true;
381}
382
383static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC,
384 const InterpFrame *Frame, const Function *F,
385 bool CheckSign, const CallExpr *Call) {
386 const Floating &Arg = S.Stk.peek<Floating>();
387 bool IsInf = Arg.isInf();
388
389 if (CheckSign)
390 pushInteger(S, IsInf ? (Arg.isNegative() ? -1 : 1) : 0, Call->getType());
391 else
392 pushInteger(S, Arg.isInf(), Call->getType());
393 return true;
394}
395
396static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC,
397 const InterpFrame *Frame,
398 const Function *F, const CallExpr *Call) {
399 const Floating &Arg = S.Stk.peek<Floating>();
400
401 pushInteger(S, Arg.isFinite(), Call->getType());
402 return true;
403}
404
405static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC,
406 const InterpFrame *Frame,
407 const Function *F, const CallExpr *Call) {
408 const Floating &Arg = S.Stk.peek<Floating>();
409
410 pushInteger(S, Arg.isNormal(), Call->getType());
411 return true;
412}
413
414static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC,
415 const InterpFrame *Frame,
416 const Function *F,
417 const CallExpr *Call) {
418 const Floating &Arg = S.Stk.peek<Floating>();
419
420 pushInteger(S, Arg.isDenormal(), Call->getType());
421 return true;
422}
423
424static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC,
425 const InterpFrame *Frame, const Function *F,
426 const CallExpr *Call) {
427 const Floating &Arg = S.Stk.peek<Floating>();
428
429 pushInteger(S, Arg.isZero(), Call->getType());
430 return true;
431}
432
433/// First parameter to __builtin_isfpclass is the floating value, the
434/// second one is an integral value.
435static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC,
436 const InterpFrame *Frame,
437 const Function *Func,
438 const CallExpr *Call) {
439 PrimType FPClassArgT = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType());
440 APSInt FPClassArg = peekToAPSInt(Stk&: S.Stk, T: FPClassArgT);
441 const Floating &F =
442 S.Stk.peek<Floating>(Offset: align(Size: primSize(Type: FPClassArgT) + primSize(Type: PT_Float)));
443
444 int32_t Result =
445 static_cast<int32_t>((F.classify() & FPClassArg).getZExtValue());
446 pushInteger(S, Result, Call->getType());
447
448 return true;
449}
450
451/// Five int values followed by one floating value.
452static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC,
453 const InterpFrame *Frame,
454 const Function *Func,
455 const CallExpr *Call) {
456 const Floating &Val = S.Stk.peek<Floating>();
457
458 unsigned Index;
459 switch (Val.getCategory()) {
460 case APFloat::fcNaN:
461 Index = 0;
462 break;
463 case APFloat::fcInfinity:
464 Index = 1;
465 break;
466 case APFloat::fcNormal:
467 Index = Val.isDenormal() ? 3 : 2;
468 break;
469 case APFloat::fcZero:
470 Index = 4;
471 break;
472 }
473
474 // The last argument is first on the stack.
475 assert(Index <= 4);
476 unsigned IntSize = primSize(Type: getIntPrimType(S));
477 unsigned Offset =
478 align(Size: primSize(Type: PT_Float)) + ((1 + (4 - Index)) * align(Size: IntSize));
479
480 APSInt I = peekToAPSInt(Stk&: S.Stk, T: getIntPrimType(S), Offset);
481 pushInteger(S, I, Call->getType());
482 return true;
483}
484
485// The C standard says "fabs raises no floating-point exceptions,
486// even if x is a signaling NaN. The returned value is independent of
487// the current rounding direction mode." Therefore constant folding can
488// proceed without regard to the floating point settings.
489// Reference, WG14 N2478 F.10.4.3
490static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC,
491 const InterpFrame *Frame,
492 const Function *Func) {
493 const Floating &Val = getParam<Floating>(Frame, Index: 0);
494
495 S.Stk.push<Floating>(Args: Floating::abs(F: Val));
496 return true;
497}
498
499static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC,
500 const InterpFrame *Frame,
501 const Function *Func,
502 const CallExpr *Call) {
503 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
504 APSInt Val = peekToAPSInt(Stk&: S.Stk, T: ArgT);
505 pushInteger(S, Val.popcount(), Call->getType());
506 return true;
507}
508
509static bool interp__builtin_parity(InterpState &S, CodePtr OpPC,
510 const InterpFrame *Frame,
511 const Function *Func, const CallExpr *Call) {
512 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
513 APSInt Val = peekToAPSInt(Stk&: S.Stk, T: ArgT);
514 pushInteger(S, Val.popcount() % 2, Call->getType());
515 return true;
516}
517
518static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC,
519 const InterpFrame *Frame,
520 const Function *Func, const CallExpr *Call) {
521 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
522 APSInt Val = peekToAPSInt(Stk&: S.Stk, T: ArgT);
523 pushInteger(S, Val.getBitWidth() - Val.getSignificantBits(), Call->getType());
524 return true;
525}
526
527static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC,
528 const InterpFrame *Frame,
529 const Function *Func,
530 const CallExpr *Call) {
531 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
532 APSInt Val = peekToAPSInt(Stk&: S.Stk, T: ArgT);
533 pushInteger(S, Val.reverseBits(), Call->getType());
534 return true;
535}
536
537static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC,
538 const InterpFrame *Frame,
539 const Function *Func,
540 const CallExpr *Call) {
541 // This is an unevaluated call, so there are no arguments on the stack.
542 assert(Call->getNumArgs() == 1);
543 const Expr *Arg = Call->getArg(Arg: 0);
544
545 GCCTypeClass ResultClass =
546 EvaluateBuiltinClassifyType(T: Arg->getType(), LangOpts: S.getLangOpts());
547 int32_t ReturnVal = static_cast<int32_t>(ResultClass);
548 pushInteger(S, ReturnVal, Call->getType());
549 return true;
550}
551
552// __builtin_expect(long, long)
553// __builtin_expect_with_probability(long, long, double)
554static bool interp__builtin_expect(InterpState &S, CodePtr OpPC,
555 const InterpFrame *Frame,
556 const Function *Func, const CallExpr *Call) {
557 // The return value is simply the value of the first parameter.
558 // We ignore the probability.
559 unsigned NumArgs = Call->getNumArgs();
560 assert(NumArgs == 2 || NumArgs == 3);
561
562 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
563 unsigned Offset = align(Size: primSize(Type: getLongPrimType(S))) * 2;
564 if (NumArgs == 3)
565 Offset += align(Size: primSize(Type: PT_Float));
566
567 APSInt Val = peekToAPSInt(Stk&: S.Stk, T: ArgT, Offset);
568 pushInteger(S, Val, Call->getType());
569 return true;
570}
571
572/// rotateleft(value, amount)
573static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC,
574 const InterpFrame *Frame,
575 const Function *Func, const CallExpr *Call,
576 bool Right) {
577 PrimType AmountT = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType());
578 PrimType ValueT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
579
580 APSInt Amount = peekToAPSInt(Stk&: S.Stk, T: AmountT);
581 APSInt Value = peekToAPSInt(
582 Stk&: S.Stk, T: ValueT, Offset: align(Size: primSize(Type: AmountT)) + align(Size: primSize(Type: ValueT)));
583
584 APSInt Result;
585 if (Right)
586 Result = APSInt(Value.rotr(rotateAmt: Amount.urem(RHS: Value.getBitWidth())),
587 /*IsUnsigned=*/true);
588 else // Left.
589 Result = APSInt(Value.rotl(rotateAmt: Amount.urem(RHS: Value.getBitWidth())),
590 /*IsUnsigned=*/true);
591
592 pushInteger(S, Result, Call->getType());
593 return true;
594}
595
596static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC,
597 const InterpFrame *Frame, const Function *Func,
598 const CallExpr *Call) {
599 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
600 APSInt Value = peekToAPSInt(Stk&: S.Stk, T: ArgT);
601
602 uint64_t N = Value.countr_zero();
603 pushInteger(S, N == Value.getBitWidth() ? 0 : N + 1, Call->getType());
604 return true;
605}
606
607static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC,
608 const InterpFrame *Frame,
609 const Function *Func,
610 const CallExpr *Call) {
611 PrimType PtrT =
612 S.getContext().classify(T: Call->getArg(Arg: 0)->getType()).value_or(u: PT_Ptr);
613
614 if (PtrT == PT_FnPtr) {
615 const FunctionPointer &Arg = S.Stk.peek<FunctionPointer>();
616 S.Stk.push<FunctionPointer>(Args: Arg);
617 } else if (PtrT == PT_Ptr) {
618 const Pointer &Arg = S.Stk.peek<Pointer>();
619 S.Stk.push<Pointer>(Args: Arg);
620 } else {
621 assert(false && "Unsupported pointer type passed to __builtin_addressof()");
622 }
623 return true;
624}
625
626static bool interp__builtin_move(InterpState &S, CodePtr OpPC,
627 const InterpFrame *Frame, const Function *Func,
628 const CallExpr *Call) {
629
630 PrimType ArgT = S.getContext().classify(E: Call->getArg(Arg: 0)).value_or(u: PT_Ptr);
631
632 TYPE_SWITCH(ArgT, const T &Arg = S.Stk.peek<T>(); S.Stk.push<T>(Arg););
633
634 return Func->getDecl()->isConstexpr();
635}
636
637static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC,
638 const InterpFrame *Frame,
639 const Function *Func,
640 const CallExpr *Call) {
641 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
642 APSInt Arg = peekToAPSInt(Stk&: S.Stk, T: ArgT);
643
644 int Result =
645 S.getCtx().getTargetInfo().getEHDataRegisterNumber(RegNo: Arg.getZExtValue());
646 pushInteger(S, Result, Call->getType());
647 return true;
648}
649
650/// Just takes the first Argument to the call and puts it on the stack.
651static bool noopPointer(InterpState &S, CodePtr OpPC, const InterpFrame *Frame,
652 const Function *Func, const CallExpr *Call) {
653 const Pointer &Arg = S.Stk.peek<Pointer>();
654 S.Stk.push<Pointer>(Args: Arg);
655 return true;
656}
657
658// Two integral values followed by a pointer (lhs, rhs, resultOut)
659static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC,
660 const InterpFrame *Frame,
661 const Function *Func,
662 const CallExpr *Call) {
663 Pointer &ResultPtr = S.Stk.peek<Pointer>();
664 if (ResultPtr.isDummy())
665 return false;
666
667 unsigned BuiltinOp = Func->getBuiltinID();
668 PrimType RHST = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType());
669 PrimType LHST = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
670 APSInt RHS = peekToAPSInt(Stk&: S.Stk, T: RHST,
671 Offset: align(Size: primSize(Type: PT_Ptr)) + align(Size: primSize(Type: RHST)));
672 APSInt LHS = peekToAPSInt(Stk&: S.Stk, T: LHST,
673 Offset: align(Size: primSize(Type: PT_Ptr)) + align(Size: primSize(Type: RHST)) +
674 align(Size: primSize(Type: LHST)));
675 QualType ResultType = Call->getArg(Arg: 2)->getType()->getPointeeType();
676 PrimType ResultT = *S.getContext().classify(T: ResultType);
677 bool Overflow;
678
679 APSInt Result;
680 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
681 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
682 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
683 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
684 ResultType->isSignedIntegerOrEnumerationType();
685 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
686 ResultType->isSignedIntegerOrEnumerationType();
687 uint64_t LHSSize = LHS.getBitWidth();
688 uint64_t RHSSize = RHS.getBitWidth();
689 uint64_t ResultSize = S.getCtx().getTypeSize(T: ResultType);
690 uint64_t MaxBits = std::max(a: std::max(a: LHSSize, b: RHSSize), b: ResultSize);
691
692 // Add an additional bit if the signedness isn't uniformly agreed to. We
693 // could do this ONLY if there is a signed and an unsigned that both have
694 // MaxBits, but the code to check that is pretty nasty. The issue will be
695 // caught in the shrink-to-result later anyway.
696 if (IsSigned && !AllSigned)
697 ++MaxBits;
698
699 LHS = APSInt(LHS.extOrTrunc(width: MaxBits), !IsSigned);
700 RHS = APSInt(RHS.extOrTrunc(width: MaxBits), !IsSigned);
701 Result = APSInt(MaxBits, !IsSigned);
702 }
703
704 // Find largest int.
705 switch (BuiltinOp) {
706 default:
707 llvm_unreachable("Invalid value for BuiltinOp");
708 case Builtin::BI__builtin_add_overflow:
709 case Builtin::BI__builtin_sadd_overflow:
710 case Builtin::BI__builtin_saddl_overflow:
711 case Builtin::BI__builtin_saddll_overflow:
712 case Builtin::BI__builtin_uadd_overflow:
713 case Builtin::BI__builtin_uaddl_overflow:
714 case Builtin::BI__builtin_uaddll_overflow:
715 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
716 : LHS.uadd_ov(RHS, Overflow);
717 break;
718 case Builtin::BI__builtin_sub_overflow:
719 case Builtin::BI__builtin_ssub_overflow:
720 case Builtin::BI__builtin_ssubl_overflow:
721 case Builtin::BI__builtin_ssubll_overflow:
722 case Builtin::BI__builtin_usub_overflow:
723 case Builtin::BI__builtin_usubl_overflow:
724 case Builtin::BI__builtin_usubll_overflow:
725 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
726 : LHS.usub_ov(RHS, Overflow);
727 break;
728 case Builtin::BI__builtin_mul_overflow:
729 case Builtin::BI__builtin_smul_overflow:
730 case Builtin::BI__builtin_smull_overflow:
731 case Builtin::BI__builtin_smulll_overflow:
732 case Builtin::BI__builtin_umul_overflow:
733 case Builtin::BI__builtin_umull_overflow:
734 case Builtin::BI__builtin_umulll_overflow:
735 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
736 : LHS.umul_ov(RHS, Overflow);
737 break;
738 }
739
740 // In the case where multiple sizes are allowed, truncate and see if
741 // the values are the same.
742 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
743 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
744 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
745 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
746 // since it will give us the behavior of a TruncOrSelf in the case where
747 // its parameter <= its size. We previously set Result to be at least the
748 // type-size of the result, so getTypeSize(ResultType) <= Resu
749 APSInt Temp = Result.extOrTrunc(width: S.getCtx().getTypeSize(T: ResultType));
750 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
751
752 if (!APSInt::isSameValue(I1: Temp, I2: Result))
753 Overflow = true;
754 Result = Temp;
755 }
756
757 // Write Result to ResultPtr and put Overflow on the stacl.
758 assignInteger(Dest&: ResultPtr, ValueT: ResultT, Value: Result);
759 ResultPtr.initialize();
760 assert(Func->getDecl()->getReturnType()->isBooleanType());
761 S.Stk.push<Boolean>(Args&: Overflow);
762 return true;
763}
764
765/// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
766static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC,
767 const InterpFrame *Frame,
768 const Function *Func,
769 const CallExpr *Call) {
770 unsigned BuiltinOp = Func->getBuiltinID();
771 PrimType LHST = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
772 PrimType RHST = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType());
773 PrimType CarryT = *S.getContext().classify(T: Call->getArg(Arg: 2)->getType());
774 APSInt RHS = peekToAPSInt(Stk&: S.Stk, T: RHST,
775 Offset: align(Size: primSize(Type: PT_Ptr)) + align(Size: primSize(Type: CarryT)) +
776 align(Size: primSize(Type: RHST)));
777 APSInt LHS =
778 peekToAPSInt(Stk&: S.Stk, T: LHST,
779 Offset: align(Size: primSize(Type: PT_Ptr)) + align(Size: primSize(Type: RHST)) +
780 align(Size: primSize(Type: CarryT)) + align(Size: primSize(Type: LHST)));
781 APSInt CarryIn = peekToAPSInt(
782 Stk&: S.Stk, T: LHST, Offset: align(Size: primSize(Type: PT_Ptr)) + align(Size: primSize(Type: CarryT)));
783 APSInt CarryOut;
784
785 APSInt Result;
786 // Copy the number of bits and sign.
787 Result = LHS;
788 CarryOut = LHS;
789
790 bool FirstOverflowed = false;
791 bool SecondOverflowed = false;
792 switch (BuiltinOp) {
793 default:
794 llvm_unreachable("Invalid value for BuiltinOp");
795 case Builtin::BI__builtin_addcb:
796 case Builtin::BI__builtin_addcs:
797 case Builtin::BI__builtin_addc:
798 case Builtin::BI__builtin_addcl:
799 case Builtin::BI__builtin_addcll:
800 Result =
801 LHS.uadd_ov(RHS, Overflow&: FirstOverflowed).uadd_ov(RHS: CarryIn, Overflow&: SecondOverflowed);
802 break;
803 case Builtin::BI__builtin_subcb:
804 case Builtin::BI__builtin_subcs:
805 case Builtin::BI__builtin_subc:
806 case Builtin::BI__builtin_subcl:
807 case Builtin::BI__builtin_subcll:
808 Result =
809 LHS.usub_ov(RHS, Overflow&: FirstOverflowed).usub_ov(RHS: CarryIn, Overflow&: SecondOverflowed);
810 break;
811 }
812 // It is possible for both overflows to happen but CGBuiltin uses an OR so
813 // this is consistent.
814 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
815
816 Pointer &CarryOutPtr = S.Stk.peek<Pointer>();
817 QualType CarryOutType = Call->getArg(Arg: 3)->getType()->getPointeeType();
818 PrimType CarryOutT = *S.getContext().classify(T: CarryOutType);
819 assignInteger(Dest&: CarryOutPtr, ValueT: CarryOutT, Value: CarryOut);
820 CarryOutPtr.initialize();
821
822 assert(Call->getType() == Call->getArg(0)->getType());
823 pushInteger(S, Result, Call->getType());
824 return true;
825}
826
827static bool interp__builtin_clz(InterpState &S, CodePtr OpPC,
828 const InterpFrame *Frame, const Function *Func,
829 const CallExpr *Call) {
830 unsigned CallSize = callArgSize(S, C: Call);
831 unsigned BuiltinOp = Func->getBuiltinID();
832 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
833 const APSInt &Val = peekToAPSInt(Stk&: S.Stk, T: ValT, Offset: CallSize);
834
835 // When the argument is 0, the result of GCC builtins is undefined, whereas
836 // for Microsoft intrinsics, the result is the bit-width of the argument.
837 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
838 BuiltinOp != Builtin::BI__lzcnt &&
839 BuiltinOp != Builtin::BI__lzcnt64;
840
841 if (Val == 0) {
842 if (Func->getBuiltinID() == Builtin::BI__builtin_clzg &&
843 Call->getNumArgs() == 2) {
844 // We have a fallback parameter.
845 PrimType FallbackT = *S.getContext().classify(E: Call->getArg(Arg: 1));
846 const APSInt &Fallback = peekToAPSInt(Stk&: S.Stk, T: FallbackT);
847 pushInteger(S, Fallback, Call->getType());
848 return true;
849 }
850
851 if (ZeroIsUndefined)
852 return false;
853 }
854
855 pushInteger(S, Val.countl_zero(), Call->getType());
856 return true;
857}
858
859static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC,
860 const InterpFrame *Frame, const Function *Func,
861 const CallExpr *Call) {
862 unsigned CallSize = callArgSize(S, C: Call);
863 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
864 const APSInt &Val = peekToAPSInt(Stk&: S.Stk, T: ValT, Offset: CallSize);
865
866 if (Val == 0) {
867 if (Func->getBuiltinID() == Builtin::BI__builtin_ctzg &&
868 Call->getNumArgs() == 2) {
869 // We have a fallback parameter.
870 PrimType FallbackT = *S.getContext().classify(E: Call->getArg(Arg: 1));
871 const APSInt &Fallback = peekToAPSInt(Stk&: S.Stk, T: FallbackT);
872 pushInteger(S, Fallback, Call->getType());
873 return true;
874 }
875 return false;
876 }
877
878 pushInteger(S, Val.countr_zero(), Call->getType());
879 return true;
880}
881
882static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC,
883 const InterpFrame *Frame,
884 const Function *Func, const CallExpr *Call) {
885 PrimType ReturnT = *S.getContext().classify(Call->getType());
886 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
887 const APSInt &Val = peekToAPSInt(Stk&: S.Stk, T: ValT);
888 assert(Val.getActiveBits() <= 64);
889
890 INT_TYPE_SWITCH(ReturnT,
891 { S.Stk.push<T>(T::from(Val.byteSwap().getZExtValue())); });
892 return true;
893}
894
895/// bool __atomic_always_lock_free(size_t, void const volatile*)
896/// bool __atomic_is_lock_free(size_t, void const volatile*)
897/// bool __c11_atomic_is_lock_free(size_t)
898static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC,
899 const InterpFrame *Frame,
900 const Function *Func,
901 const CallExpr *Call) {
902 unsigned BuiltinOp = Func->getBuiltinID();
903
904 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
905 unsigned SizeValOffset = 0;
906 if (BuiltinOp != Builtin::BI__c11_atomic_is_lock_free)
907 SizeValOffset = align(Size: primSize(Type: ValT)) + align(Size: primSize(Type: PT_Ptr));
908 const APSInt &SizeVal = peekToAPSInt(Stk&: S.Stk, T: ValT, Offset: SizeValOffset);
909
910 auto returnBool = [&S](bool Value) -> bool {
911 S.Stk.push<Boolean>(Args&: Value);
912 return true;
913 };
914
915 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
916 // of two less than or equal to the maximum inline atomic width, we know it
917 // is lock-free. If the size isn't a power of two, or greater than the
918 // maximum alignment where we promote atomics, we know it is not lock-free
919 // (at least not in the sense of atomic_is_lock_free). Otherwise,
920 // the answer can only be determined at runtime; for example, 16-byte
921 // atomics have lock-free implementations on some, but not all,
922 // x86-64 processors.
923
924 // Check power-of-two.
925 CharUnits Size = CharUnits::fromQuantity(Quantity: SizeVal.getZExtValue());
926 if (Size.isPowerOfTwo()) {
927 // Check against inlining width.
928 unsigned InlineWidthBits =
929 S.getCtx().getTargetInfo().getMaxAtomicInlineWidth();
930 if (Size <= S.getCtx().toCharUnitsFromBits(BitSize: InlineWidthBits)) {
931
932 // OK, we will inline appropriately-aligned operations of this size,
933 // and _Atomic(T) is appropriately-aligned.
934 if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
935 Size == CharUnits::One())
936 return returnBool(true);
937
938 // Same for null pointers.
939 assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
940 const Pointer &Ptr = S.Stk.peek<Pointer>();
941 if (Ptr.isZero())
942 return returnBool(true);
943
944 QualType PointeeType = Call->getArg(Arg: 1)
945 ->IgnoreImpCasts()
946 ->getType()
947 ->castAs<PointerType>()
948 ->getPointeeType();
949 // OK, we will inline operations on this object.
950 if (!PointeeType->isIncompleteType() &&
951 S.getCtx().getTypeAlignInChars(T: PointeeType) >= Size)
952 return returnBool(true);
953 }
954 }
955
956 if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
957 return returnBool(false);
958
959 return false;
960}
961
962/// __builtin_complex(Float A, float B);
963static bool interp__builtin_complex(InterpState &S, CodePtr OpPC,
964 const InterpFrame *Frame,
965 const Function *Func,
966 const CallExpr *Call) {
967 const Floating &Arg2 = S.Stk.peek<Floating>();
968 const Floating &Arg1 = S.Stk.peek<Floating>(Offset: align(Size: primSize(Type: PT_Float)) * 2);
969 Pointer &Result = S.Stk.peek<Pointer>(Offset: align(Size: primSize(Type: PT_Float)) * 2 +
970 align(Size: primSize(Type: PT_Ptr)));
971
972 Result.atIndex(Idx: 0).deref<Floating>() = Arg1;
973 Result.atIndex(Idx: 0).initialize();
974 Result.atIndex(Idx: 1).deref<Floating>() = Arg2;
975 Result.atIndex(Idx: 1).initialize();
976 Result.initialize();
977
978 return true;
979}
980
981/// __builtin_is_aligned()
982/// __builtin_align_up()
983/// __builtin_align_down()
984/// The first parameter is either an integer or a pointer.
985/// The second parameter is the requested alignment as an integer.
986static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC,
987 const InterpFrame *Frame,
988 const Function *Func,
989 const CallExpr *Call) {
990 unsigned BuiltinOp = Func->getBuiltinID();
991 unsigned CallSize = callArgSize(S, C: Call);
992
993 PrimType AlignmentT = *S.Ctx.classify(E: Call->getArg(Arg: 1));
994 const APSInt &Alignment = peekToAPSInt(Stk&: S.Stk, T: AlignmentT);
995
996 if (Alignment < 0 || !Alignment.isPowerOf2()) {
997 S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
998 return false;
999 }
1000 unsigned SrcWidth = S.getCtx().getIntWidth(T: Call->getArg(Arg: 0)->getType());
1001 APSInt MaxValue(APInt::getOneBitSet(numBits: SrcWidth, BitNo: SrcWidth - 1));
1002 if (APSInt::compareValues(I1: Alignment, I2: MaxValue) > 0) {
1003 S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
1004 << MaxValue << Call->getArg(0)->getType() << Alignment;
1005 return false;
1006 }
1007
1008 // The first parameter is either an integer or a pointer (but not a function
1009 // pointer).
1010 PrimType FirstArgT = *S.Ctx.classify(E: Call->getArg(Arg: 0));
1011
1012 if (isIntegralType(T: FirstArgT)) {
1013 const APSInt &Src = peekToAPSInt(Stk&: S.Stk, T: FirstArgT, Offset: CallSize);
1014 APSInt Align = Alignment.extOrTrunc(width: Src.getBitWidth());
1015 if (BuiltinOp == Builtin::BI__builtin_align_up) {
1016 APSInt AlignedVal =
1017 APSInt((Src + (Align - 1)) & ~(Align - 1), Src.isUnsigned());
1018 pushInteger(S, AlignedVal, Call->getType());
1019 } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
1020 APSInt AlignedVal = APSInt(Src & ~(Align - 1), Src.isUnsigned());
1021 pushInteger(S, AlignedVal, Call->getType());
1022 } else {
1023 assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
1024 S.Stk.push<Boolean>(Args: (Src & (Align - 1)) == 0);
1025 }
1026 return true;
1027 }
1028
1029 assert(FirstArgT == PT_Ptr);
1030 const Pointer &Ptr = S.Stk.peek<Pointer>(Offset: CallSize);
1031
1032 unsigned PtrOffset = Ptr.getByteOffset();
1033 PtrOffset = Ptr.getIndex();
1034 CharUnits BaseAlignment =
1035 S.getCtx().getDeclAlign(Ptr.getDeclDesc()->asValueDecl());
1036 CharUnits PtrAlign =
1037 BaseAlignment.alignmentAtOffset(offset: CharUnits::fromQuantity(Quantity: PtrOffset));
1038
1039 if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
1040 if (PtrAlign.getQuantity() >= Alignment) {
1041 S.Stk.push<Boolean>(Args: true);
1042 return true;
1043 }
1044 // If the alignment is not known to be sufficient, some cases could still
1045 // be aligned at run time. However, if the requested alignment is less or
1046 // equal to the base alignment and the offset is not aligned, we know that
1047 // the run-time value can never be aligned.
1048 if (BaseAlignment.getQuantity() >= Alignment &&
1049 PtrAlign.getQuantity() < Alignment) {
1050 S.Stk.push<Boolean>(Args: false);
1051 return true;
1052 }
1053
1054 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
1055 << Alignment;
1056 return false;
1057 }
1058
1059 assert(BuiltinOp == Builtin::BI__builtin_align_down ||
1060 BuiltinOp == Builtin::BI__builtin_align_up);
1061
1062 // For align_up/align_down, we can return the same value if the alignment
1063 // is known to be greater or equal to the requested value.
1064 if (PtrAlign.getQuantity() >= Alignment) {
1065 S.Stk.push<Pointer>(Args: Ptr);
1066 return true;
1067 }
1068
1069 // The alignment could be greater than the minimum at run-time, so we cannot
1070 // infer much about the resulting pointer value. One case is possible:
1071 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
1072 // can infer the correct index if the requested alignment is smaller than
1073 // the base alignment so we can perform the computation on the offset.
1074 if (BaseAlignment.getQuantity() >= Alignment) {
1075 assert(Alignment.getBitWidth() <= 64 &&
1076 "Cannot handle > 64-bit address-space");
1077 uint64_t Alignment64 = Alignment.getZExtValue();
1078 CharUnits NewOffset =
1079 CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
1080 ? llvm::alignDown(PtrOffset, Alignment64)
1081 : llvm::alignTo(PtrOffset, Alignment64));
1082
1083 S.Stk.push<Pointer>(Args: Ptr.atIndex(Idx: NewOffset.getQuantity()));
1084 return true;
1085 }
1086
1087 // Otherwise, we cannot constant-evaluate the result.
1088 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
1089 return false;
1090}
1091
1092static bool interp__builtin_os_log_format_buffer_size(InterpState &S,
1093 CodePtr OpPC,
1094 const InterpFrame *Frame,
1095 const Function *Func,
1096 const CallExpr *Call) {
1097 analyze_os_log::OSLogBufferLayout Layout;
1098 analyze_os_log::computeOSLogBufferLayout(Ctx&: S.getCtx(), E: Call, layout&: Layout);
1099 pushInteger(S, Layout.size().getQuantity(), Call->getType());
1100 return true;
1101}
1102
1103bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F,
1104 const CallExpr *Call) {
1105 const InterpFrame *Frame = S.Current;
1106 APValue Dummy;
1107
1108 std::optional<PrimType> ReturnT = S.getContext().classify(Call);
1109
1110 switch (F->getBuiltinID()) {
1111 case Builtin::BI__builtin_is_constant_evaluated:
1112 if (!interp__builtin_is_constant_evaluated(S, OpPC, Frame, Call))
1113 return false;
1114 break;
1115 case Builtin::BI__builtin_assume:
1116 case Builtin::BI__assume:
1117 break;
1118 case Builtin::BI__builtin_strcmp:
1119 if (!interp__builtin_strcmp(S, OpPC, Frame, Call))
1120 return false;
1121 break;
1122 case Builtin::BI__builtin_strlen:
1123 if (!interp__builtin_strlen(S, OpPC, Frame, Call))
1124 return false;
1125 break;
1126 case Builtin::BI__builtin_nan:
1127 case Builtin::BI__builtin_nanf:
1128 case Builtin::BI__builtin_nanl:
1129 case Builtin::BI__builtin_nanf16:
1130 case Builtin::BI__builtin_nanf128:
1131 if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/false))
1132 return false;
1133 break;
1134 case Builtin::BI__builtin_nans:
1135 case Builtin::BI__builtin_nansf:
1136 case Builtin::BI__builtin_nansl:
1137 case Builtin::BI__builtin_nansf16:
1138 case Builtin::BI__builtin_nansf128:
1139 if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/true))
1140 return false;
1141 break;
1142
1143 case Builtin::BI__builtin_huge_val:
1144 case Builtin::BI__builtin_huge_valf:
1145 case Builtin::BI__builtin_huge_vall:
1146 case Builtin::BI__builtin_huge_valf16:
1147 case Builtin::BI__builtin_huge_valf128:
1148 case Builtin::BI__builtin_inf:
1149 case Builtin::BI__builtin_inff:
1150 case Builtin::BI__builtin_infl:
1151 case Builtin::BI__builtin_inff16:
1152 case Builtin::BI__builtin_inff128:
1153 if (!interp__builtin_inf(S, OpPC, Frame, F))
1154 return false;
1155 break;
1156 case Builtin::BI__builtin_copysign:
1157 case Builtin::BI__builtin_copysignf:
1158 case Builtin::BI__builtin_copysignl:
1159 case Builtin::BI__builtin_copysignf128:
1160 if (!interp__builtin_copysign(S, OpPC, Frame, F))
1161 return false;
1162 break;
1163
1164 case Builtin::BI__builtin_fmin:
1165 case Builtin::BI__builtin_fminf:
1166 case Builtin::BI__builtin_fminl:
1167 case Builtin::BI__builtin_fminf16:
1168 case Builtin::BI__builtin_fminf128:
1169 if (!interp__builtin_fmin(S, OpPC, Frame, F))
1170 return false;
1171 break;
1172
1173 case Builtin::BI__builtin_fmax:
1174 case Builtin::BI__builtin_fmaxf:
1175 case Builtin::BI__builtin_fmaxl:
1176 case Builtin::BI__builtin_fmaxf16:
1177 case Builtin::BI__builtin_fmaxf128:
1178 if (!interp__builtin_fmax(S, OpPC, Frame, Func: F))
1179 return false;
1180 break;
1181
1182 case Builtin::BI__builtin_isnan:
1183 if (!interp__builtin_isnan(S, OpPC, Frame, F, Call))
1184 return false;
1185 break;
1186 case Builtin::BI__builtin_issignaling:
1187 if (!interp__builtin_issignaling(S, OpPC, Frame, F, Call))
1188 return false;
1189 break;
1190
1191 case Builtin::BI__builtin_isinf:
1192 if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/CheckSign: false, Call))
1193 return false;
1194 break;
1195
1196 case Builtin::BI__builtin_isinf_sign:
1197 if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/CheckSign: true, Call))
1198 return false;
1199 break;
1200
1201 case Builtin::BI__builtin_isfinite:
1202 if (!interp__builtin_isfinite(S, OpPC, Frame, F, Call))
1203 return false;
1204 break;
1205 case Builtin::BI__builtin_isnormal:
1206 if (!interp__builtin_isnormal(S, OpPC, Frame, F, Call))
1207 return false;
1208 break;
1209 case Builtin::BI__builtin_issubnormal:
1210 if (!interp__builtin_issubnormal(S, OpPC, Frame, F, Call))
1211 return false;
1212 break;
1213 case Builtin::BI__builtin_iszero:
1214 if (!interp__builtin_iszero(S, OpPC, Frame, F, Call))
1215 return false;
1216 break;
1217 case Builtin::BI__builtin_isfpclass:
1218 if (!interp__builtin_isfpclass(S, OpPC, Frame, Func: F, Call))
1219 return false;
1220 break;
1221 case Builtin::BI__builtin_fpclassify:
1222 if (!interp__builtin_fpclassify(S, OpPC, Frame, Func: F, Call))
1223 return false;
1224 break;
1225
1226 case Builtin::BI__builtin_fabs:
1227 case Builtin::BI__builtin_fabsf:
1228 case Builtin::BI__builtin_fabsl:
1229 case Builtin::BI__builtin_fabsf128:
1230 if (!interp__builtin_fabs(S, OpPC, Frame, Func: F))
1231 return false;
1232 break;
1233
1234 case Builtin::BI__builtin_popcount:
1235 case Builtin::BI__builtin_popcountl:
1236 case Builtin::BI__builtin_popcountll:
1237 case Builtin::BI__builtin_popcountg:
1238 case Builtin::BI__popcnt16: // Microsoft variants of popcount
1239 case Builtin::BI__popcnt:
1240 case Builtin::BI__popcnt64:
1241 if (!interp__builtin_popcount(S, OpPC, Frame, Func: F, Call))
1242 return false;
1243 break;
1244
1245 case Builtin::BI__builtin_parity:
1246 case Builtin::BI__builtin_parityl:
1247 case Builtin::BI__builtin_parityll:
1248 if (!interp__builtin_parity(S, OpPC, Frame, Func: F, Call))
1249 return false;
1250 break;
1251
1252 case Builtin::BI__builtin_clrsb:
1253 case Builtin::BI__builtin_clrsbl:
1254 case Builtin::BI__builtin_clrsbll:
1255 if (!interp__builtin_clrsb(S, OpPC, Frame, Func: F, Call))
1256 return false;
1257 break;
1258
1259 case Builtin::BI__builtin_bitreverse8:
1260 case Builtin::BI__builtin_bitreverse16:
1261 case Builtin::BI__builtin_bitreverse32:
1262 case Builtin::BI__builtin_bitreverse64:
1263 if (!interp__builtin_bitreverse(S, OpPC, Frame, Func: F, Call))
1264 return false;
1265 break;
1266
1267 case Builtin::BI__builtin_classify_type:
1268 if (!interp__builtin_classify_type(S, OpPC, Frame, Func: F, Call))
1269 return false;
1270 break;
1271
1272 case Builtin::BI__builtin_expect:
1273 case Builtin::BI__builtin_expect_with_probability:
1274 if (!interp__builtin_expect(S, OpPC, Frame, Func: F, Call))
1275 return false;
1276 break;
1277
1278 case Builtin::BI__builtin_rotateleft8:
1279 case Builtin::BI__builtin_rotateleft16:
1280 case Builtin::BI__builtin_rotateleft32:
1281 case Builtin::BI__builtin_rotateleft64:
1282 case Builtin::BI_rotl8: // Microsoft variants of rotate left
1283 case Builtin::BI_rotl16:
1284 case Builtin::BI_rotl:
1285 case Builtin::BI_lrotl:
1286 case Builtin::BI_rotl64:
1287 if (!interp__builtin_rotate(S, OpPC, Frame, Func: F, Call, /*Right=*/false))
1288 return false;
1289 break;
1290
1291 case Builtin::BI__builtin_rotateright8:
1292 case Builtin::BI__builtin_rotateright16:
1293 case Builtin::BI__builtin_rotateright32:
1294 case Builtin::BI__builtin_rotateright64:
1295 case Builtin::BI_rotr8: // Microsoft variants of rotate right
1296 case Builtin::BI_rotr16:
1297 case Builtin::BI_rotr:
1298 case Builtin::BI_lrotr:
1299 case Builtin::BI_rotr64:
1300 if (!interp__builtin_rotate(S, OpPC, Frame, Func: F, Call, /*Right=*/true))
1301 return false;
1302 break;
1303
1304 case Builtin::BI__builtin_ffs:
1305 case Builtin::BI__builtin_ffsl:
1306 case Builtin::BI__builtin_ffsll:
1307 if (!interp__builtin_ffs(S, OpPC, Frame, Func: F, Call))
1308 return false;
1309 break;
1310 case Builtin::BIaddressof:
1311 case Builtin::BI__addressof:
1312 case Builtin::BI__builtin_addressof:
1313 if (!interp__builtin_addressof(S, OpPC, Frame, Func: F, Call))
1314 return false;
1315 break;
1316
1317 case Builtin::BIas_const:
1318 case Builtin::BIforward:
1319 case Builtin::BIforward_like:
1320 case Builtin::BImove:
1321 case Builtin::BImove_if_noexcept:
1322 if (!interp__builtin_move(S, OpPC, Frame, Func: F, Call))
1323 return false;
1324 break;
1325
1326 case Builtin::BI__builtin_eh_return_data_regno:
1327 if (!interp__builtin_eh_return_data_regno(S, OpPC, Frame, Func: F, Call))
1328 return false;
1329 break;
1330
1331 case Builtin::BI__builtin_launder:
1332 case Builtin::BI__builtin___CFStringMakeConstantString:
1333 case Builtin::BI__builtin___NSStringMakeConstantString:
1334 if (!noopPointer(S, OpPC, Frame, Func: F, Call))
1335 return false;
1336 break;
1337
1338 case Builtin::BI__builtin_add_overflow:
1339 case Builtin::BI__builtin_sub_overflow:
1340 case Builtin::BI__builtin_mul_overflow:
1341 case Builtin::BI__builtin_sadd_overflow:
1342 case Builtin::BI__builtin_uadd_overflow:
1343 case Builtin::BI__builtin_uaddl_overflow:
1344 case Builtin::BI__builtin_uaddll_overflow:
1345 case Builtin::BI__builtin_usub_overflow:
1346 case Builtin::BI__builtin_usubl_overflow:
1347 case Builtin::BI__builtin_usubll_overflow:
1348 case Builtin::BI__builtin_umul_overflow:
1349 case Builtin::BI__builtin_umull_overflow:
1350 case Builtin::BI__builtin_umulll_overflow:
1351 case Builtin::BI__builtin_saddl_overflow:
1352 case Builtin::BI__builtin_saddll_overflow:
1353 case Builtin::BI__builtin_ssub_overflow:
1354 case Builtin::BI__builtin_ssubl_overflow:
1355 case Builtin::BI__builtin_ssubll_overflow:
1356 case Builtin::BI__builtin_smul_overflow:
1357 case Builtin::BI__builtin_smull_overflow:
1358 case Builtin::BI__builtin_smulll_overflow:
1359 if (!interp__builtin_overflowop(S, OpPC, Frame, Func: F, Call))
1360 return false;
1361 break;
1362
1363 case Builtin::BI__builtin_addcb:
1364 case Builtin::BI__builtin_addcs:
1365 case Builtin::BI__builtin_addc:
1366 case Builtin::BI__builtin_addcl:
1367 case Builtin::BI__builtin_addcll:
1368 case Builtin::BI__builtin_subcb:
1369 case Builtin::BI__builtin_subcs:
1370 case Builtin::BI__builtin_subc:
1371 case Builtin::BI__builtin_subcl:
1372 case Builtin::BI__builtin_subcll:
1373 if (!interp__builtin_carryop(S, OpPC, Frame, Func: F, Call))
1374 return false;
1375 break;
1376
1377 case Builtin::BI__builtin_clz:
1378 case Builtin::BI__builtin_clzl:
1379 case Builtin::BI__builtin_clzll:
1380 case Builtin::BI__builtin_clzs:
1381 case Builtin::BI__builtin_clzg:
1382 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
1383 case Builtin::BI__lzcnt:
1384 case Builtin::BI__lzcnt64:
1385 if (!interp__builtin_clz(S, OpPC, Frame, Func: F, Call))
1386 return false;
1387 break;
1388
1389 case Builtin::BI__builtin_ctz:
1390 case Builtin::BI__builtin_ctzl:
1391 case Builtin::BI__builtin_ctzll:
1392 case Builtin::BI__builtin_ctzs:
1393 case Builtin::BI__builtin_ctzg:
1394 if (!interp__builtin_ctz(S, OpPC, Frame, Func: F, Call))
1395 return false;
1396 break;
1397
1398 case Builtin::BI__builtin_bswap16:
1399 case Builtin::BI__builtin_bswap32:
1400 case Builtin::BI__builtin_bswap64:
1401 if (!interp__builtin_bswap(S, OpPC, Frame, Func: F, Call))
1402 return false;
1403 break;
1404
1405 case Builtin::BI__atomic_always_lock_free:
1406 case Builtin::BI__atomic_is_lock_free:
1407 case Builtin::BI__c11_atomic_is_lock_free:
1408 if (!interp__builtin_atomic_lock_free(S, OpPC, Frame, Func: F, Call))
1409 return false;
1410 break;
1411
1412 case Builtin::BI__builtin_complex:
1413 if (!interp__builtin_complex(S, OpPC, Frame, Func: F, Call))
1414 return false;
1415 break;
1416
1417 case Builtin::BI__builtin_is_aligned:
1418 case Builtin::BI__builtin_align_up:
1419 case Builtin::BI__builtin_align_down:
1420 if (!interp__builtin_is_aligned_up_down(S, OpPC, Frame, Func: F, Call))
1421 return false;
1422 break;
1423
1424 case Builtin::BI__builtin_os_log_format_buffer_size:
1425 if (!interp__builtin_os_log_format_buffer_size(S, OpPC, Frame, Func: F, Call))
1426 return false;
1427 break;
1428
1429 default:
1430 S.FFDiag(S.Current->getLocation(OpPC),
1431 diag::note_invalid_subexpr_in_const_expr)
1432 << S.Current->getRange(OpPC);
1433
1434 return false;
1435 }
1436
1437 return retPrimValue(S, OpPC, Result&: Dummy, T&: ReturnT);
1438}
1439
1440bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
1441 llvm::ArrayRef<int64_t> ArrayIndices,
1442 int64_t &IntResult) {
1443 CharUnits Result;
1444 unsigned N = E->getNumComponents();
1445 assert(N > 0);
1446
1447 unsigned ArrayIndex = 0;
1448 QualType CurrentType = E->getTypeSourceInfo()->getType();
1449 for (unsigned I = 0; I != N; ++I) {
1450 const OffsetOfNode &Node = E->getComponent(Idx: I);
1451 switch (Node.getKind()) {
1452 case OffsetOfNode::Field: {
1453 const FieldDecl *MemberDecl = Node.getField();
1454 const RecordType *RT = CurrentType->getAs<RecordType>();
1455 if (!RT)
1456 return false;
1457 const RecordDecl *RD = RT->getDecl();
1458 if (RD->isInvalidDecl())
1459 return false;
1460 const ASTRecordLayout &RL = S.getCtx().getASTRecordLayout(D: RD);
1461 unsigned FieldIndex = MemberDecl->getFieldIndex();
1462 assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
1463 Result += S.getCtx().toCharUnitsFromBits(BitSize: RL.getFieldOffset(FieldNo: FieldIndex));
1464 CurrentType = MemberDecl->getType().getNonReferenceType();
1465 break;
1466 }
1467 case OffsetOfNode::Array: {
1468 // When generating bytecode, we put all the index expressions as Sint64 on
1469 // the stack.
1470 int64_t Index = ArrayIndices[ArrayIndex];
1471 const ArrayType *AT = S.getCtx().getAsArrayType(T: CurrentType);
1472 if (!AT)
1473 return false;
1474 CurrentType = AT->getElementType();
1475 CharUnits ElementSize = S.getCtx().getTypeSizeInChars(T: CurrentType);
1476 Result += Index * ElementSize;
1477 ++ArrayIndex;
1478 break;
1479 }
1480 case OffsetOfNode::Base: {
1481 const CXXBaseSpecifier *BaseSpec = Node.getBase();
1482 if (BaseSpec->isVirtual())
1483 return false;
1484
1485 // Find the layout of the class whose base we are looking into.
1486 const RecordType *RT = CurrentType->getAs<RecordType>();
1487 if (!RT)
1488 return false;
1489 const RecordDecl *RD = RT->getDecl();
1490 if (RD->isInvalidDecl())
1491 return false;
1492 const ASTRecordLayout &RL = S.getCtx().getASTRecordLayout(D: RD);
1493
1494 // Find the base class itself.
1495 CurrentType = BaseSpec->getType();
1496 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
1497 if (!BaseRT)
1498 return false;
1499
1500 // Add the offset to the base.
1501 Result += RL.getBaseClassOffset(Base: cast<CXXRecordDecl>(Val: BaseRT->getDecl()));
1502 break;
1503 }
1504 case OffsetOfNode::Identifier:
1505 llvm_unreachable("Dependent OffsetOfExpr?");
1506 }
1507 }
1508
1509 IntResult = Result.getQuantity();
1510
1511 return true;
1512}
1513
1514bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC,
1515 const Pointer &Ptr, const APSInt &IntValue) {
1516
1517 const Record *R = Ptr.getRecord();
1518 assert(R);
1519 assert(R->getNumFields() == 1);
1520
1521 unsigned FieldOffset = R->getField(I: 0u)->Offset;
1522 const Pointer &FieldPtr = Ptr.atField(Off: FieldOffset);
1523 PrimType FieldT = *S.getContext().classify(T: FieldPtr.getType());
1524
1525 INT_TYPE_SWITCH(FieldT,
1526 FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
1527 FieldPtr.initialize();
1528 return true;
1529}
1530
1531bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
1532 assert(Src.isLive() && Dest.isLive());
1533
1534 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
1535 const Descriptor *DestDesc = Dest.getFieldDesc();
1536
1537 assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
1538
1539 if (DestDesc->isPrimitiveArray()) {
1540 assert(SrcDesc->isPrimitiveArray());
1541 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
1542 PrimType ET = DestDesc->getPrimType();
1543 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
1544 Pointer DestElem = Dest.atIndex(Idx: I);
1545 TYPE_SWITCH(ET, {
1546 DestElem.deref<T>() = Src.atIndex(I).deref<T>();
1547 DestElem.initialize();
1548 });
1549 }
1550 return true;
1551 }
1552
1553 if (DestDesc->isRecord()) {
1554 assert(SrcDesc->isRecord());
1555 assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
1556 const Record *R = DestDesc->ElemRecord;
1557 for (const Record::Field &F : R->fields()) {
1558 Pointer DestField = Dest.atField(Off: F.Offset);
1559 if (std::optional<PrimType> FT = S.Ctx.classify(F.Decl->getType())) {
1560 TYPE_SWITCH(*FT, {
1561 DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
1562 DestField.initialize();
1563 });
1564 } else {
1565 return Invalid(S, OpPC);
1566 }
1567 }
1568 return true;
1569 }
1570
1571 // FIXME: Composite types.
1572
1573 return Invalid(S, OpPC);
1574}
1575
1576} // namespace interp
1577} // namespace clang
1578

source code of clang/lib/AST/Interp/InterpBuiltin.cpp