1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCUDARuntime.h"
14#include "CGCXXABI.h"
15#include "CGCall.h"
16#include "CGCleanup.h"
17#include "CGDebugInfo.h"
18#include "CGObjCRuntime.h"
19#include "CGOpenMPRuntime.h"
20#include "CGRecordLayout.h"
21#include "CodeGenFunction.h"
22#include "CodeGenModule.h"
23#include "ConstantEmitter.h"
24#include "TargetInfo.h"
25#include "clang/AST/ASTContext.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/DeclObjC.h"
28#include "clang/AST/NSAPI.h"
29#include "clang/Basic/Builtins.h"
30#include "clang/Basic/CodeGenOptions.h"
31#include "clang/Basic/SourceManager.h"
32#include "llvm/ADT/Hashing.h"
33#include "llvm/ADT/StringExtras.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/Intrinsics.h"
36#include "llvm/IR/LLVMContext.h"
37#include "llvm/IR/MDBuilder.h"
38#include "llvm/Support/ConvertUTF.h"
39#include "llvm/Support/MathExtras.h"
40#include "llvm/Support/Path.h"
41#include "llvm/Support/SaveAndRestore.h"
42#include "llvm/Transforms/Utils/SanitizerStats.h"
43
44#include <string>
45
46using namespace clang;
47using namespace CodeGen;
48
49//===--------------------------------------------------------------------===//
50// Miscellaneous Helper Methods
51//===--------------------------------------------------------------------===//
52
53llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
54 unsigned addressSpace =
55 cast<llvm::PointerType>(value->getType())->getAddressSpace();
56
57 llvm::PointerType *destType = Int8PtrTy;
58 if (addressSpace)
59 destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
60
61 if (value->getType() == destType) return value;
62 return Builder.CreateBitCast(value, destType);
63}
64
65/// CreateTempAlloca - This creates a alloca and inserts it into the entry
66/// block.
67Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty,
68 CharUnits Align,
69 const Twine &Name,
70 llvm::Value *ArraySize) {
71 auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
72 Alloca->setAlignment(Align.getAsAlign());
73 return Address(Alloca, Align);
74}
75
76/// CreateTempAlloca - This creates a alloca and inserts it into the entry
77/// block. The alloca is casted to default address space if necessary.
78Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
79 const Twine &Name,
80 llvm::Value *ArraySize,
81 Address *AllocaAddr) {
82 auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
83 if (AllocaAddr)
84 *AllocaAddr = Alloca;
85 llvm::Value *V = Alloca.getPointer();
86 // Alloca always returns a pointer in alloca address space, which may
87 // be different from the type defined by the language. For example,
88 // in C++ the auto variables are in the default address space. Therefore
89 // cast alloca to the default address space when necessary.
90 if (getASTAllocaAddressSpace() != LangAS::Default) {
91 auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);
92 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
93 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
94 // otherwise alloca is inserted at the current insertion point of the
95 // builder.
96 if (!ArraySize)
97 Builder.SetInsertPoint(AllocaInsertPt);
98 V = getTargetHooks().performAddrSpaceCast(
99 *this, V, getASTAllocaAddressSpace(), LangAS::Default,
100 Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);
101 }
102
103 return Address(V, Align);
104}
105
106/// CreateTempAlloca - This creates an alloca and inserts it into the entry
107/// block if \p ArraySize is nullptr, otherwise inserts it at the current
108/// insertion point of the builder.
109llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
110 const Twine &Name,
111 llvm::Value *ArraySize) {
112 if (ArraySize)
113 return Builder.CreateAlloca(Ty, ArraySize, Name);
114 return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
115 ArraySize, Name, AllocaInsertPt);
116}
117
118/// CreateDefaultAlignTempAlloca - This creates an alloca with the
119/// default alignment of the corresponding LLVM type, which is *not*
120/// guaranteed to be related in any way to the expected alignment of
121/// an AST type that might have been lowered to Ty.
122Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
123 const Twine &Name) {
124 CharUnits Align =
125 CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlignment(Ty));
126 return CreateTempAlloca(Ty, Align, Name);
127}
128
129void CodeGenFunction::InitTempAlloca(Address Var, llvm::Value *Init) {
130 auto *Alloca = Var.getPointer();
131 assert(isa<llvm::AllocaInst>(Alloca) ||
132 (isa<llvm::AddrSpaceCastInst>(Alloca) &&
133 isa<llvm::AllocaInst>(
134 cast<llvm::AddrSpaceCastInst>(Alloca)->getPointerOperand())));
135
136 auto *Store = new llvm::StoreInst(Init, Alloca, /*volatile*/ false,
137 Var.getAlignment().getAsAlign());
138 llvm::BasicBlock *Block = AllocaInsertPt->getParent();
139 Block->getInstList().insertAfter(AllocaInsertPt->getIterator(), Store);
140}
141
142Address CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) {
143 CharUnits Align = getContext().getTypeAlignInChars(Ty);
144 return CreateTempAlloca(ConvertType(Ty), Align, Name);
145}
146
147Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name,
148 Address *Alloca) {
149 // FIXME: Should we prefer the preferred type alignment here?
150 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
151}
152
153Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
154 const Twine &Name, Address *Alloca) {
155 Address Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
156 /*ArraySize=*/nullptr, Alloca);
157
158 if (Ty->isConstantMatrixType()) {
159 auto *ArrayTy = cast<llvm::ArrayType>(Result.getType()->getElementType());
160 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
161 ArrayTy->getNumElements());
162
163 Result = Address(
164 Builder.CreateBitCast(Result.getPointer(), VectorTy->getPointerTo()),
165 Result.getAlignment());
166 }
167 return Result;
168}
169
170Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, CharUnits Align,
171 const Twine &Name) {
172 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
173}
174
175Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
176 const Twine &Name) {
177 return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
178 Name);
179}
180
181/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
182/// expression and compare the result against zero, returning an Int1Ty value.
183llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
184 PGO.setCurrentStmt(E);
185 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
186 llvm::Value *MemPtr = EmitScalarExpr(E);
187 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
188 }
189
190 QualType BoolTy = getContext().BoolTy;
191 SourceLocation Loc = E->getExprLoc();
192 CGFPOptionsRAII FPOptsRAII(*this, E);
193 if (!E->getType()->isAnyComplexType())
194 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
195
196 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy,
197 Loc);
198}
199
200/// EmitIgnoredExpr - Emit code to compute the specified expression,
201/// ignoring the result.
202void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
203 if (E->isRValue())
204 return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
205
206 // Just emit it as an l-value and drop the result.
207 EmitLValue(E);
208}
209
210/// EmitAnyExpr - Emit code to compute the specified expression which
211/// can have any type. The result is returned as an RValue struct.
212/// If this is an aggregate expression, AggSlot indicates where the
213/// result should be returned.
214RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
215 AggValueSlot aggSlot,
216 bool ignoreResult) {
217 switch (getEvaluationKind(E->getType())) {
218 case TEK_Scalar:
219 return RValue::get(EmitScalarExpr(E, ignoreResult));
220 case TEK_Complex:
221 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
222 case TEK_Aggregate:
223 if (!ignoreResult && aggSlot.isIgnored())
224 aggSlot = CreateAggTemp(E->getType(), "agg-temp");
225 EmitAggExpr(E, aggSlot);
226 return aggSlot.asRValue();
227 }
228 llvm_unreachable("bad evaluation kind");
229}
230
231/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
232/// always be accessible even if no aggregate location is provided.
233RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
234 AggValueSlot AggSlot = AggValueSlot::ignored();
235
236 if (hasAggregateEvaluationKind(E->getType()))
237 AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
238 return EmitAnyExpr(E, AggSlot);
239}
240
241/// EmitAnyExprToMem - Evaluate an expression into a given memory
242/// location.
243void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
244 Address Location,
245 Qualifiers Quals,
246 bool IsInit) {
247 // FIXME: This function should take an LValue as an argument.
248 switch (getEvaluationKind(E->getType())) {
249 case TEK_Complex:
250 EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()),
251 /*isInit*/ false);
252 return;
253
254 case TEK_Aggregate: {
255 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
256 AggValueSlot::IsDestructed_t(IsInit),
257 AggValueSlot::DoesNotNeedGCBarriers,
258 AggValueSlot::IsAliased_t(!IsInit),
259 AggValueSlot::MayOverlap));
260 return;
261 }
262
263 case TEK_Scalar: {
264 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
265 LValue LV = MakeAddrLValue(Location, E->getType());
266 EmitStoreThroughLValue(RV, LV);
267 return;
268 }
269 }
270 llvm_unreachable("bad evaluation kind");
271}
272
273static void
274pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
275 const Expr *E, Address ReferenceTemporary) {
276 // Objective-C++ ARC:
277 // If we are binding a reference to a temporary that has ownership, we
278 // need to perform retain/release operations on the temporary.
279 //
280 // FIXME: This should be looking at E, not M.
281 if (auto Lifetime = M->getType().getObjCLifetime()) {
282 switch (Lifetime) {
283 case Qualifiers::OCL_None:
284 case Qualifiers::OCL_ExplicitNone:
285 // Carry on to normal cleanup handling.
286 break;
287
288 case Qualifiers::OCL_Autoreleasing:
289 // Nothing to do; cleaned up by an autorelease pool.
290 return;
291
292 case Qualifiers::OCL_Strong:
293 case Qualifiers::OCL_Weak:
294 switch (StorageDuration Duration = M->getStorageDuration()) {
295 case SD_Static:
296 // Note: we intentionally do not register a cleanup to release
297 // the object on program termination.
298 return;
299
300 case SD_Thread:
301 // FIXME: We should probably register a cleanup in this case.
302 return;
303
304 case SD_Automatic:
305 case SD_FullExpression:
306 CodeGenFunction::Destroyer *Destroy;
307 CleanupKind CleanupKind;
308 if (Lifetime == Qualifiers::OCL_Strong) {
309 const ValueDecl *VD = M->getExtendingDecl();
310 bool Precise =
311 VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>();
312 CleanupKind = CGF.getARCCleanupKind();
313 Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise
314 : &CodeGenFunction::destroyARCStrongImprecise;
315 } else {
316 // __weak objects always get EH cleanups; otherwise, exceptions
317 // could cause really nasty crashes instead of mere leaks.
318 CleanupKind = NormalAndEHCleanup;
319 Destroy = &CodeGenFunction::destroyARCWeak;
320 }
321 if (Duration == SD_FullExpression)
322 CGF.pushDestroy(CleanupKind, ReferenceTemporary,
323 M->getType(), *Destroy,
324 CleanupKind & EHCleanup);
325 else
326 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
327 M->getType(),
328 *Destroy, CleanupKind & EHCleanup);
329 return;
330
331 case SD_Dynamic:
332 llvm_unreachable("temporary cannot have dynamic storage duration");
333 }
334 llvm_unreachable("unknown storage duration");
335 }
336 }
337
338 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
339 if (const RecordType *RT =
340 E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
341 // Get the destructor for the reference temporary.
342 auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
343 if (!ClassDecl->hasTrivialDestructor())
344 ReferenceTemporaryDtor = ClassDecl->getDestructor();
345 }
346
347 if (!ReferenceTemporaryDtor)
348 return;
349
350 // Call the destructor for the temporary.
351 switch (M->getStorageDuration()) {
352 case SD_Static:
353 case SD_Thread: {
354 llvm::FunctionCallee CleanupFn;
355 llvm::Constant *CleanupArg;
356 if (E->getType()->isArrayType()) {
357 CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper(
358 ReferenceTemporary, E->getType(),
359 CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions,
360 dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
361 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
362 } else {
363 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
364 GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
365 CleanupArg = cast<llvm::Constant>(ReferenceTemporary.getPointer());
366 }
367 CGF.CGM.getCXXABI().registerGlobalDtor(
368 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
369 break;
370 }
371
372 case SD_FullExpression:
373 CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
374 CodeGenFunction::destroyCXXObject,
375 CGF.getLangOpts().Exceptions);
376 break;
377
378 case SD_Automatic:
379 CGF.pushLifetimeExtendedDestroy(NormalAndEHCleanup,
380 ReferenceTemporary, E->getType(),
381 CodeGenFunction::destroyCXXObject,
382 CGF.getLangOpts().Exceptions);
383 break;
384
385 case SD_Dynamic:
386 llvm_unreachable("temporary cannot have dynamic storage duration");
387 }
388}
389
390static Address createReferenceTemporary(CodeGenFunction &CGF,
391 const MaterializeTemporaryExpr *M,
392 const Expr *Inner,
393 Address *Alloca = nullptr) {
394 auto &TCG = CGF.getTargetHooks();
395 switch (M->getStorageDuration()) {
396 case SD_FullExpression:
397 case SD_Automatic: {
398 // If we have a constant temporary array or record try to promote it into a
399 // constant global under the same rules a normal constant would've been
400 // promoted. This is easier on the optimizer and generally emits fewer
401 // instructions.
402 QualType Ty = Inner->getType();
403 if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
404 (Ty->isArrayType() || Ty->isRecordType()) &&
405 CGF.CGM.isTypeConstant(Ty, true))
406 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
407 auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
408 auto *GV = new llvm::GlobalVariable(
409 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
410 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
411 llvm::GlobalValue::NotThreadLocal,
412 CGF.getContext().getTargetAddressSpace(AS));
413 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
414 GV->setAlignment(alignment.getAsAlign());
415 llvm::Constant *C = GV;
416 if (AS != LangAS::Default)
417 C = TCG.performAddrSpaceCast(
418 CGF.CGM, GV, AS, LangAS::Default,
419 GV->getValueType()->getPointerTo(
420 CGF.getContext().getTargetAddressSpace(LangAS::Default)));
421 // FIXME: Should we put the new global into a COMDAT?
422 return Address(C, alignment);
423 }
424 return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
425 }
426 case SD_Thread:
427 case SD_Static:
428 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
429
430 case SD_Dynamic:
431 llvm_unreachable("temporary can't have dynamic storage duration");
432 }
433 llvm_unreachable("unknown storage duration");
434}
435
436/// Helper method to check if the underlying ABI is AAPCS
437static bool isAAPCS(const TargetInfo &TargetInfo) {
438 return TargetInfo.getABI().startswith("aapcs");
439}
440
441LValue CodeGenFunction::
442EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
443 const Expr *E = M->getSubExpr();
444
445 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
446 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
447 "Reference should never be pseudo-strong!");
448
449 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
450 // as that will cause the lifetime adjustment to be lost for ARC
451 auto ownership = M->getType().getObjCLifetime();
452 if (ownership != Qualifiers::OCL_None &&
453 ownership != Qualifiers::OCL_ExplicitNone) {
454 Address Object = createReferenceTemporary(*this, M, E);
455 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
456 Object = Address(llvm::ConstantExpr::getBitCast(Var,
457 ConvertTypeForMem(E->getType())
458 ->getPointerTo(Object.getAddressSpace())),
459 Object.getAlignment());
460
461 // createReferenceTemporary will promote the temporary to a global with a
462 // constant initializer if it can. It can only do this to a value of
463 // ARC-manageable type if the value is global and therefore "immune" to
464 // ref-counting operations. Therefore we have no need to emit either a
465 // dynamic initialization or a cleanup and we can just return the address
466 // of the temporary.
467 if (Var->hasInitializer())
468 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
469
470 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
471 }
472 LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
473 AlignmentSource::Decl);
474
475 switch (getEvaluationKind(E->getType())) {
476 default: llvm_unreachable("expected scalar or aggregate expression");
477 case TEK_Scalar:
478 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
479 break;
480 case TEK_Aggregate: {
481 EmitAggExpr(E, AggValueSlot::forAddr(Object,
482 E->getType().getQualifiers(),
483 AggValueSlot::IsDestructed,
484 AggValueSlot::DoesNotNeedGCBarriers,
485 AggValueSlot::IsNotAliased,
486 AggValueSlot::DoesNotOverlap));
487 break;
488 }
489 }
490
491 pushTemporaryCleanup(*this, M, E, Object);
492 return RefTempDst;
493 }
494
495 SmallVector<const Expr *, 2> CommaLHSs;
496 SmallVector<SubobjectAdjustment, 2> Adjustments;
497 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
498
499 for (const auto &Ignored : CommaLHSs)
500 EmitIgnoredExpr(Ignored);
501
502 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
503 if (opaque->getType()->isRecordType()) {
504 assert(Adjustments.empty());
505 return EmitOpaqueValueLValue(opaque);
506 }
507 }
508
509 // Create and initialize the reference temporary.
510 Address Alloca = Address::invalid();
511 Address Object = createReferenceTemporary(*this, M, E, &Alloca);
512 if (auto *Var = dyn_cast<llvm::GlobalVariable>(
513 Object.getPointer()->stripPointerCasts())) {
514 Object = Address(llvm::ConstantExpr::getBitCast(
515 cast<llvm::Constant>(Object.getPointer()),
516 ConvertTypeForMem(E->getType())->getPointerTo()),
517 Object.getAlignment());
518 // If the temporary is a global and has a constant initializer or is a
519 // constant temporary that we promoted to a global, we may have already
520 // initialized it.
521 if (!Var->hasInitializer()) {
522 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
523 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
524 }
525 } else {
526 switch (M->getStorageDuration()) {
527 case SD_Automatic:
528 if (auto *Size = EmitLifetimeStart(
529 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
530 Alloca.getPointer())) {
531 pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
532 Alloca, Size);
533 }
534 break;
535
536 case SD_FullExpression: {
537 if (!ShouldEmitLifetimeMarkers)
538 break;
539
540 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
541 // marker. Instead, start the lifetime of a conditional temporary earlier
542 // so that it's unconditional. Don't do this with sanitizers which need
543 // more precise lifetime marks.
544 ConditionalEvaluation *OldConditional = nullptr;
545 CGBuilderTy::InsertPoint OldIP;
546 if (isInConditionalBranch() && !E->getType().isDestructedType() &&
547 !SanOpts.has(SanitizerKind::HWAddress) &&
548 !SanOpts.has(SanitizerKind::Memory) &&
549 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) {
550 OldConditional = OutermostConditional;
551 OutermostConditional = nullptr;
552
553 OldIP = Builder.saveIP();
554 llvm::BasicBlock *Block = OldConditional->getStartingBlock();
555 Builder.restoreIP(CGBuilderTy::InsertPoint(
556 Block, llvm::BasicBlock::iterator(Block->back())));
557 }
558
559 if (auto *Size = EmitLifetimeStart(
560 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
561 Alloca.getPointer())) {
562 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
563 Size);
564 }
565
566 if (OldConditional) {
567 OutermostConditional = OldConditional;
568 Builder.restoreIP(OldIP);
569 }
570 break;
571 }
572
573 default:
574 break;
575 }
576 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
577 }
578 pushTemporaryCleanup(*this, M, E, Object);
579
580 // Perform derived-to-base casts and/or field accesses, to get from the
581 // temporary object we created (and, potentially, for which we extended
582 // the lifetime) to the subobject we're binding the reference to.
583 for (unsigned I = Adjustments.size(); I != 0; --I) {
584 SubobjectAdjustment &Adjustment = Adjustments[I-1];
585 switch (Adjustment.Kind) {
586 case SubobjectAdjustment::DerivedToBaseAdjustment:
587 Object =
588 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
589 Adjustment.DerivedToBase.BasePath->path_begin(),
590 Adjustment.DerivedToBase.BasePath->path_end(),
591 /*NullCheckValue=*/ false, E->getExprLoc());
592 break;
593
594 case SubobjectAdjustment::FieldAdjustment: {
595 LValue LV = MakeAddrLValue(Object, E->getType(), AlignmentSource::Decl);
596 LV = EmitLValueForField(LV, Adjustment.Field);
597 assert(LV.isSimple() &&
598 "materialized temporary field is not a simple lvalue");
599 Object = LV.getAddress(*this);
600 break;
601 }
602
603 case SubobjectAdjustment::MemberPointerAdjustment: {
604 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
605 Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr,
606 Adjustment.Ptr.MPT);
607 break;
608 }
609 }
610 }
611
612 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
613}
614
615RValue
616CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) {
617 // Emit the expression as an lvalue.
618 LValue LV = EmitLValue(E);
619 assert(LV.isSimple());
620 llvm::Value *Value = LV.getPointer(*this);
621
622 if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) {
623 // C++11 [dcl.ref]p5 (as amended by core issue 453):
624 // If a glvalue to which a reference is directly bound designates neither
625 // an existing object or function of an appropriate type nor a region of
626 // storage of suitable size and alignment to contain an object of the
627 // reference's type, the behavior is undefined.
628 QualType Ty = E->getType();
629 EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty);
630 }
631
632 return RValue::get(Value);
633}
634
635
636/// getAccessedFieldNo - Given an encoded value and a result number, return the
637/// input field number being accessed.
638unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
639 const llvm::Constant *Elts) {
640 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
641 ->getZExtValue();
642}
643
644/// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
645static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low,
646 llvm::Value *High) {
647 llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL);
648 llvm::Value *K47 = Builder.getInt64(47);
649 llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul);
650 llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0);
651 llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul);
652 llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0);
653 return Builder.CreateMul(B1, KMul);
654}
655
656bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) {
657 return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
658 TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation;
659}
660
661bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) {
662 CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
663 return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
664 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
665 TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference ||
666 TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation);
667}
668
669bool CodeGenFunction::sanitizePerformTypeCheck() const {
670 return SanOpts.has(SanitizerKind::Null) |
671 SanOpts.has(SanitizerKind::Alignment) |
672 SanOpts.has(SanitizerKind::ObjectSize) |
673 SanOpts.has(SanitizerKind::Vptr);
674}
675
676void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
677 llvm::Value *Ptr, QualType Ty,
678 CharUnits Alignment,
679 SanitizerSet SkippedChecks,
680 llvm::Value *ArraySize) {
681 if (!sanitizePerformTypeCheck())
682 return;
683
684 // Don't check pointers outside the default address space. The null check
685 // isn't correct, the object-size check isn't supported by LLVM, and we can't
686 // communicate the addresses to the runtime handler for the vptr check.
687 if (Ptr->getType()->getPointerAddressSpace())
688 return;
689
690 // Don't check pointers to volatile data. The behavior here is implementation-
691 // defined.
692 if (Ty.isVolatileQualified())
693 return;
694
695 SanitizerScope SanScope(this);
696
697 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks;
698 llvm::BasicBlock *Done = nullptr;
699
700 // Quickly determine whether we have a pointer to an alloca. It's possible
701 // to skip null checks, and some alignment checks, for these pointers. This
702 // can reduce compile-time significantly.
703 auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
704
705 llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
706 llvm::Value *IsNonNull = nullptr;
707 bool IsGuaranteedNonNull =
708 SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
709 bool AllowNullPointers = isNullPointerAllowed(TCK);
710 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
711 !IsGuaranteedNonNull) {
712 // The glvalue must not be an empty glvalue.
713 IsNonNull = Builder.CreateIsNotNull(Ptr);
714
715 // The IR builder can constant-fold the null check if the pointer points to
716 // a constant.
717 IsGuaranteedNonNull = IsNonNull == True;
718
719 // Skip the null check if the pointer is known to be non-null.
720 if (!IsGuaranteedNonNull) {
721 if (AllowNullPointers) {
722 // When performing pointer casts, it's OK if the value is null.
723 // Skip the remaining checks in that case.
724 Done = createBasicBlock("null");
725 llvm::BasicBlock *Rest = createBasicBlock("not.null");
726 Builder.CreateCondBr(IsNonNull, Rest, Done);
727 EmitBlock(Rest);
728 } else {
729 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null));
730 }
731 }
732 }
733
734 if (SanOpts.has(SanitizerKind::ObjectSize) &&
735 !SkippedChecks.has(SanitizerKind::ObjectSize) &&
736 !Ty->isIncompleteType()) {
737 uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity();
738 llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
739 if (ArraySize)
740 Size = Builder.CreateMul(Size, ArraySize);
741
742 // Degenerate case: new X[0] does not need an objectsize check.
743 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
744 if (!ConstantSize || !ConstantSize->isNullValue()) {
745 // The glvalue must refer to a large enough storage region.
746 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
747 // to check this.
748 // FIXME: Get object address space
749 llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
750 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
751 llvm::Value *Min = Builder.getFalse();
752 llvm::Value *NullIsUnknown = Builder.getFalse();
753 llvm::Value *Dynamic = Builder.getFalse();
754 llvm::Value *CastAddr = Builder.CreateBitCast(Ptr, Int8PtrTy);
755 llvm::Value *LargeEnough = Builder.CreateICmpUGE(
756 Builder.CreateCall(F, {CastAddr, Min, NullIsUnknown, Dynamic}), Size);
757 Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
758 }
759 }
760
761 uint64_t AlignVal = 0;
762 llvm::Value *PtrAsInt = nullptr;
763
764 if (SanOpts.has(SanitizerKind::Alignment) &&
765 !SkippedChecks.has(SanitizerKind::Alignment)) {
766 AlignVal = Alignment.getQuantity();
767 if (!Ty->isIncompleteType() && !AlignVal)
768 AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
769 /*ForPointeeType=*/true)
770 .getQuantity();
771
772 // The glvalue must be suitably aligned.
773 if (AlignVal > 1 &&
774 (!PtrToAlloca || PtrToAlloca->getAlignment() < AlignVal)) {
775 PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
776 llvm::Value *Align = Builder.CreateAnd(
777 PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
778 llvm::Value *Aligned =
779 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
780 if (Aligned != True)
781 Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment));
782 }
783 }
784
785 if (Checks.size() > 0) {
786 // Make sure we're not losing information. Alignment needs to be a power of
787 // 2
788 assert(!AlignVal || (uint64_t)1 << llvm::Log2_64(AlignVal) == AlignVal);
789 llvm::Constant *StaticData[] = {
790 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty),
791 llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2_64(AlignVal) : 1),
792 llvm::ConstantInt::get(Int8Ty, TCK)};
793 EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
794 PtrAsInt ? PtrAsInt : Ptr);
795 }
796
797 // If possible, check that the vptr indicates that there is a subobject of
798 // type Ty at offset zero within this object.
799 //
800 // C++11 [basic.life]p5,6:
801 // [For storage which does not refer to an object within its lifetime]
802 // The program has undefined behavior if:
803 // -- the [pointer or glvalue] is used to access a non-static data member
804 // or call a non-static member function
805 if (SanOpts.has(SanitizerKind::Vptr) &&
806 !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
807 // Ensure that the pointer is non-null before loading it. If there is no
808 // compile-time guarantee, reuse the run-time null check or emit a new one.
809 if (!IsGuaranteedNonNull) {
810 if (!IsNonNull)
811 IsNonNull = Builder.CreateIsNotNull(Ptr);
812 if (!Done)
813 Done = createBasicBlock("vptr.null");
814 llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
815 Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
816 EmitBlock(VptrNotNull);
817 }
818
819 // Compute a hash of the mangled name of the type.
820 //
821 // FIXME: This is not guaranteed to be deterministic! Move to a
822 // fingerprinting mechanism once LLVM provides one. For the time
823 // being the implementation happens to be deterministic.
824 SmallString<64> MangledName;
825 llvm::raw_svector_ostream Out(MangledName);
826 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
827 Out);
828
829 // Contained in NoSanitizeList based on the mangled type.
830 if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
831 Out.str())) {
832 llvm::hash_code TypeHash = hash_value(Out.str());
833
834 // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
835 llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
836 llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
837 Address VPtrAddr(Builder.CreateBitCast(Ptr, VPtrTy), getPointerAlign());
838 llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
839 llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
840
841 llvm::Value *Hash = emitHash16Bytes(Builder, Low, High);
842 Hash = Builder.CreateTrunc(Hash, IntPtrTy);
843
844 // Look the hash up in our cache.
845 const int CacheSize = 128;
846 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
847 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
848 "__ubsan_vptr_type_cache");
849 llvm::Value *Slot = Builder.CreateAnd(Hash,
850 llvm::ConstantInt::get(IntPtrTy,
851 CacheSize-1));
852 llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
853 llvm::Value *CacheVal = Builder.CreateAlignedLoad(
854 IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
855 getPointerAlign());
856
857 // If the hash isn't in the cache, call a runtime handler to perform the
858 // hard work of checking whether the vptr is for an object of the right
859 // type. This will either fill in the cache and return, or produce a
860 // diagnostic.
861 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
862 llvm::Constant *StaticData[] = {
863 EmitCheckSourceLocation(Loc),
864 EmitCheckTypeDescriptor(Ty),
865 CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
866 llvm::ConstantInt::get(Int8Ty, TCK)
867 };
868 llvm::Value *DynamicData[] = { Ptr, Hash };
869 EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
870 SanitizerHandler::DynamicTypeCacheMiss, StaticData,
871 DynamicData);
872 }
873 }
874
875 if (Done) {
876 Builder.CreateBr(Done);
877 EmitBlock(Done);
878 }
879}
880
881/// Determine whether this expression refers to a flexible array member in a
882/// struct. We disable array bounds checks for such members.
883static bool isFlexibleArrayMemberExpr(const Expr *E) {
884 // For compatibility with existing code, we treat arrays of length 0 or
885 // 1 as flexible array members.
886 // FIXME: This is inconsistent with the warning code in SemaChecking. Unify
887 // the two mechanisms.
888 const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe();
889 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
890 // FIXME: Sema doesn't treat [1] as a flexible array member if the bound
891 // was produced by macro expansion.
892 if (CAT->getSize().ugt(1))
893 return false;
894 } else if (!isa<IncompleteArrayType>(AT))
895 return false;
896
897 E = E->IgnoreParens();
898
899 // A flexible array member must be the last member in the class.
900 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
901 // FIXME: If the base type of the member expr is not FD->getParent(),
902 // this should not be treated as a flexible array member access.
903 if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
904 // FIXME: Sema doesn't treat a T[1] union member as a flexible array
905 // member, only a T[0] or T[] member gets that treatment.
906 if (FD->getParent()->isUnion())
907 return true;
908 RecordDecl::field_iterator FI(
909 DeclContext::decl_iterator(const_cast<FieldDecl *>(FD)));
910 return ++FI == FD->getParent()->field_end();
911 }
912 } else if (const auto *IRE = dyn_cast<ObjCIvarRefExpr>(E)) {
913 return IRE->getDecl()->getNextIvar() == nullptr;
914 }
915
916 return false;
917}
918
919llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E,
920 QualType EltTy) {
921 ASTContext &C = getContext();
922 uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
923 if (!EltSize)
924 return nullptr;
925
926 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
927 if (!ArrayDeclRef)
928 return nullptr;
929
930 auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
931 if (!ParamDecl)
932 return nullptr;
933
934 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
935 if (!POSAttr)
936 return nullptr;
937
938 // Don't load the size if it's a lower bound.
939 int POSType = POSAttr->getType();
940 if (POSType != 0 && POSType != 1)
941 return nullptr;
942
943 // Find the implicit size parameter.
944 auto PassedSizeIt = SizeArguments.find(ParamDecl);
945 if (PassedSizeIt == SizeArguments.end())
946 return nullptr;
947
948 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
949 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
950 Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
951 llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
952 C.getSizeType(), E->getExprLoc());
953 llvm::Value *SizeOfElement =
954 llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
955 return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
956}
957
958/// If Base is known to point to the start of an array, return the length of
959/// that array. Return 0 if the length cannot be determined.
960static llvm::Value *getArrayIndexingBound(
961 CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType) {
962 // For the vector indexing extension, the bound is the number of elements.
963 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
964 IndexedType = Base->getType();
965 return CGF.Builder.getInt32(VT->getNumElements());
966 }
967
968 Base = Base->IgnoreParens();
969
970 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
971 if (CE->getCastKind() == CK_ArrayToPointerDecay &&
972 !isFlexibleArrayMemberExpr(CE->getSubExpr())) {
973 IndexedType = CE->getSubExpr()->getType();
974 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
975 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
976 return CGF.Builder.getInt(CAT->getSize());
977 else if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
978 return CGF.getVLASize(VAT).NumElts;
979 // Ignore pass_object_size here. It's not applicable on decayed pointers.
980 }
981 }
982
983 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
984 if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
985 IndexedType = Base->getType();
986 return POS;
987 }
988
989 return nullptr;
990}
991
992void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
993 llvm::Value *Index, QualType IndexType,
994 bool Accessed) {
995 assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
996 "should not be called unless adding bounds checks");
997 SanitizerScope SanScope(this);
998
999 QualType IndexedType;
1000 llvm::Value *Bound = getArrayIndexingBound(*this, Base, IndexedType);
1001 if (!Bound)
1002 return;
1003
1004 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1005 llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
1006 llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
1007
1008 llvm::Constant *StaticData[] = {
1009 EmitCheckSourceLocation(E->getExprLoc()),
1010 EmitCheckTypeDescriptor(IndexedType),
1011 EmitCheckTypeDescriptor(IndexType)
1012 };
1013 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
1014 : Builder.CreateICmpULE(IndexVal, BoundVal);
1015 EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds),
1016 SanitizerHandler::OutOfBounds, StaticData, Index);
1017}
1018
1019
1020CodeGenFunction::ComplexPairTy CodeGenFunction::
1021EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
1022 bool isInc, bool isPre) {
1023 ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
1024
1025 llvm::Value *NextVal;
1026 if (isa<llvm::IntegerType>(InVal.first->getType())) {
1027 uint64_t AmountVal = isInc ? 1 : -1;
1028 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1029
1030 // Add the inc/dec to the real part.
1031 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1032 } else {
1033 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1034 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1035 if (!isInc)
1036 FVal.changeSign();
1037 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1038
1039 // Add the inc/dec to the real part.
1040 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1041 }
1042
1043 ComplexPairTy IncVal(NextVal, InVal.second);
1044
1045 // Store the updated result through the lvalue.
1046 EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1047 if (getLangOpts().OpenMP)
1048 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
1049 E->getSubExpr());
1050
1051 // If this is a postinc, return the value read from memory, otherwise use the
1052 // updated value.
1053 return isPre ? IncVal : InVal;
1054}
1055
1056void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E,
1057 CodeGenFunction *CGF) {
1058 // Bind VLAs in the cast type.
1059 if (CGF && E->getType()->isVariablyModifiedType())
1060 CGF->EmitVariablyModifiedType(E->getType());
1061
1062 if (CGDebugInfo *DI = getModuleDebugInfo())
1063 DI->EmitExplicitCastType(E->getType());
1064}
1065
1066//===----------------------------------------------------------------------===//
1067// LValue Expression Emission
1068//===----------------------------------------------------------------------===//
1069
1070/// EmitPointerWithAlignment - Given an expression of pointer type, try to
1071/// derive a more accurate bound on the alignment of the pointer.
1072Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
1073 LValueBaseInfo *BaseInfo,
1074 TBAAAccessInfo *TBAAInfo) {
1075 // We allow this with ObjC object pointers because of fragile ABIs.
1076 assert(E->getType()->isPointerType() ||
1077 E->getType()->isObjCObjectPointerType());
1078 E = E->IgnoreParens();
1079
1080 // Casts:
1081 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1082 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1083 CGM.EmitExplicitCastExprType(ECE, this);
1084
1085 switch (CE->getCastKind()) {
1086 // Non-converting casts (but not C's implicit conversion from void*).
1087 case CK_BitCast:
1088 case CK_NoOp:
1089 case CK_AddressSpaceConversion:
1090 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1091 if (PtrTy->getPointeeType()->isVoidType())
1092 break;
1093
1094 LValueBaseInfo InnerBaseInfo;
1095 TBAAAccessInfo InnerTBAAInfo;
1096 Address Addr = EmitPointerWithAlignment(CE->getSubExpr(),
1097 &InnerBaseInfo,
1098 &InnerTBAAInfo);
1099 if (BaseInfo) *BaseInfo = InnerBaseInfo;
1100 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1101
1102 if (isa<ExplicitCastExpr>(CE)) {
1103 LValueBaseInfo TargetTypeBaseInfo;
1104 TBAAAccessInfo TargetTypeTBAAInfo;
1105 CharUnits Align = CGM.getNaturalPointeeTypeAlignment(
1106 E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1107 if (TBAAInfo)
1108 *TBAAInfo = CGM.mergeTBAAInfoForCast(*TBAAInfo,
1109 TargetTypeTBAAInfo);
1110 // If the source l-value is opaque, honor the alignment of the
1111 // casted-to type.
1112 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1113 if (BaseInfo)
1114 BaseInfo->mergeForCast(TargetTypeBaseInfo);
1115 Addr = Address(Addr.getPointer(), Align);
1116 }
1117 }
1118
1119 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1120 CE->getCastKind() == CK_BitCast) {
1121 if (auto PT = E->getType()->getAs<PointerType>())
1122 EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr.getPointer(),
1123 /*MayBeNull=*/true,
1124 CodeGenFunction::CFITCK_UnrelatedCast,
1125 CE->getBeginLoc());
1126 }
1127 return CE->getCastKind() != CK_AddressSpaceConversion
1128 ? Builder.CreateBitCast(Addr, ConvertType(E->getType()))
1129 : Builder.CreateAddrSpaceCast(Addr,
1130 ConvertType(E->getType()));
1131 }
1132 break;
1133
1134 // Array-to-pointer decay.
1135 case CK_ArrayToPointerDecay:
1136 return EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1137
1138 // Derived-to-base conversions.
1139 case CK_UncheckedDerivedToBase:
1140 case CK_DerivedToBase: {
1141 // TODO: Support accesses to members of base classes in TBAA. For now, we
1142 // conservatively pretend that the complete object is of the base class
1143 // type.
1144 if (TBAAInfo)
1145 *TBAAInfo = CGM.getTBAAAccessInfo(E->getType());
1146 Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), BaseInfo);
1147 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1148 return GetAddressOfBaseClass(Addr, Derived,
1149 CE->path_begin(), CE->path_end(),
1150 ShouldNullCheckClassCastValue(CE),
1151 CE->getExprLoc());
1152 }
1153
1154 // TODO: Is there any reason to treat base-to-derived conversions
1155 // specially?
1156 default:
1157 break;
1158 }
1159 }
1160
1161 // Unary &.
1162 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1163 if (UO->getOpcode() == UO_AddrOf) {
1164 LValue LV = EmitLValue(UO->getSubExpr());
1165 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1166 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1167 return LV.getAddress(*this);
1168 }
1169 }
1170
1171 // TODO: conditional operators, comma.
1172
1173 // Otherwise, use the alignment of the type.
1174 CharUnits Align =
1175 CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo);
1176 return Address(EmitScalarExpr(E), Align);
1177}
1178
1179llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) {
1180 llvm::Value *V = RV.getScalarVal();
1181 if (auto MPT = T->getAs<MemberPointerType>())
1182 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1183 return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1184}
1185
1186RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
1187 if (Ty->isVoidType())
1188 return RValue::get(nullptr);
1189
1190 switch (getEvaluationKind(Ty)) {
1191 case TEK_Complex: {
1192 llvm::Type *EltTy =
1193 ConvertType(Ty->castAs<ComplexType>()->getElementType());
1194 llvm::Value *U = llvm::UndefValue::get(EltTy);
1195 return RValue::getComplex(std::make_pair(U, U));
1196 }
1197
1198 // If this is a use of an undefined aggregate type, the aggregate must have an
1199 // identifiable address. Just because the contents of the value are undefined
1200 // doesn't mean that the address can't be taken and compared.
1201 case TEK_Aggregate: {
1202 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1203 return RValue::getAggregate(DestPtr);
1204 }
1205
1206 case TEK_Scalar:
1207 return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1208 }
1209 llvm_unreachable("bad evaluation kind");
1210}
1211
1212RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
1213 const char *Name) {
1214 ErrorUnsupported(E, Name);
1215 return GetUndefRValue(E->getType());
1216}
1217
1218LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
1219 const char *Name) {
1220 ErrorUnsupported(E, Name);
1221 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
1222 return MakeAddrLValue(Address(llvm::UndefValue::get(Ty), CharUnits::One()),
1223 E->getType());
1224}
1225
1226bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
1227 const Expr *Base = Obj;
1228 while (!isa<CXXThisExpr>(Base)) {
1229 // The result of a dynamic_cast can be null.
1230 if (isa<CXXDynamicCastExpr>(Base))
1231 return false;
1232
1233 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1234 Base = CE->getSubExpr();
1235 } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1236 Base = PE->getSubExpr();
1237 } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1238 if (UO->getOpcode() == UO_Extension)
1239 Base = UO->getSubExpr();
1240 else
1241 return false;
1242 } else {
1243 return false;
1244 }
1245 }
1246 return true;
1247}
1248
1249LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
1250 LValue LV;
1251 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1252 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1253 else
1254 LV = EmitLValue(E);
1255 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1256 SanitizerSet SkippedChecks;
1257 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1258 bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1259 if (IsBaseCXXThis)
1260 SkippedChecks.set(SanitizerKind::Alignment, true);
1261 if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1262 SkippedChecks.set(SanitizerKind::Null, true);
1263 }
1264 EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(*this), E->getType(),
1265 LV.getAlignment(), SkippedChecks);
1266 }
1267 return LV;
1268}
1269
1270/// EmitLValue - Emit code to compute a designator that specifies the location
1271/// of the expression.
1272///
1273/// This can return one of two things: a simple address or a bitfield reference.
1274/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1275/// an LLVM pointer type.
1276///
1277/// If this returns a bitfield reference, nothing about the pointee type of the
1278/// LLVM value is known: For example, it may not be a pointer to an integer.
1279///
1280/// If this returns a normal address, and if the lvalue's C type is fixed size,
1281/// this method guarantees that the returned pointer type will point to an LLVM
1282/// type of the same size of the lvalue's type. If the lvalue has a variable
1283/// length type, this is not possible.
1284///
1285LValue CodeGenFunction::EmitLValue(const Expr *E) {
1286 ApplyDebugLocation DL(*this, E);
1287 switch (E->getStmtClass()) {
1288 default: return EmitUnsupportedLValue(E, "l-value expression");
1289
1290 case Expr::ObjCPropertyRefExprClass:
1291 llvm_unreachable("cannot emit a property reference directly");
1292
1293 case Expr::ObjCSelectorExprClass:
1294 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
1295 case Expr::ObjCIsaExprClass:
1296 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
1297 case Expr::BinaryOperatorClass:
1298 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
1299 case Expr::CompoundAssignOperatorClass: {
1300 QualType Ty = E->getType();
1301 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1302 Ty = AT->getValueType();
1303 if (!Ty->isAnyComplexType())
1304 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1305 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1306 }
1307 case Expr::CallExprClass:
1308 case Expr::CXXMemberCallExprClass:
1309 case Expr::CXXOperatorCallExprClass:
1310 case Expr::UserDefinedLiteralClass:
1311 return EmitCallExprLValue(cast<CallExpr>(E));
1312 case Expr::CXXRewrittenBinaryOperatorClass:
1313 return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm());
1314 case Expr::VAArgExprClass:
1315 return EmitVAArgExprLValue(cast<VAArgExpr>(E));
1316 case Expr::DeclRefExprClass:
1317 return EmitDeclRefLValue(cast<DeclRefExpr>(E));
1318 case Expr::ConstantExprClass: {
1319 const ConstantExpr *CE = cast<ConstantExpr>(E);
1320 if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1321 QualType RetType = cast<CallExpr>(CE->getSubExpr()->IgnoreImplicit())
1322 ->getCallReturnType(getContext());
1323 return MakeNaturalAlignAddrLValue(Result, RetType);
1324 }
1325 return EmitLValue(cast<ConstantExpr>(E)->getSubExpr());
1326 }
1327 case Expr::ParenExprClass:
1328 return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
1329 case Expr::GenericSelectionExprClass:
1330 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
1331 case Expr::PredefinedExprClass:
1332 return EmitPredefinedLValue(cast<PredefinedExpr>(E));
1333 case Expr::StringLiteralClass:
1334 return EmitStringLiteralLValue(cast<StringLiteral>(E));
1335 case Expr::ObjCEncodeExprClass:
1336 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
1337 case Expr::PseudoObjectExprClass:
1338 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
1339 case Expr::InitListExprClass:
1340 return EmitInitListLValue(cast<InitListExpr>(E));
1341 case Expr::CXXTemporaryObjectExprClass:
1342 case Expr::CXXConstructExprClass:
1343 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
1344 case Expr::CXXBindTemporaryExprClass:
1345 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
1346 case Expr::CXXUuidofExprClass:
1347 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
1348 case Expr::LambdaExprClass:
1349 return EmitAggExprToLValue(E);
1350
1351 case Expr::ExprWithCleanupsClass: {
1352 const auto *cleanups = cast<ExprWithCleanups>(E);
1353 RunCleanupsScope Scope(*this);
1354 LValue LV = EmitLValue(cleanups->getSubExpr());
1355 if (LV.isSimple()) {
1356 // Defend against branches out of gnu statement expressions surrounded by
1357 // cleanups.
1358 llvm::Value *V = LV.getPointer(*this);
1359 Scope.ForceCleanup({&V});
1360 return LValue::MakeAddr(Address(V, LV.getAlignment()), LV.getType(),
1361 getContext(), LV.getBaseInfo(), LV.getTBAAInfo());
1362 }
1363 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1364 // bitfield lvalue or some other non-simple lvalue?
1365 return LV;
1366 }
1367
1368 case Expr::CXXDefaultArgExprClass: {
1369 auto *DAE = cast<CXXDefaultArgExpr>(E);
1370 CXXDefaultArgExprScope Scope(*this, DAE);
1371 return EmitLValue(DAE->getExpr());
1372 }
1373 case Expr::CXXDefaultInitExprClass: {
1374 auto *DIE = cast<CXXDefaultInitExpr>(E);
1375 CXXDefaultInitExprScope Scope(*this, DIE);
1376 return EmitLValue(DIE->getExpr());
1377 }
1378 case Expr::CXXTypeidExprClass:
1379 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
1380
1381 case Expr::ObjCMessageExprClass:
1382 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
1383 case Expr::ObjCIvarRefExprClass:
1384 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
1385 case Expr::StmtExprClass:
1386 return EmitStmtExprLValue(cast<StmtExpr>(E));
1387 case Expr::UnaryOperatorClass:
1388 return EmitUnaryOpLValue(cast<UnaryOperator>(E));
1389 case Expr::ArraySubscriptExprClass:
1390 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
1391 case Expr::MatrixSubscriptExprClass:
1392 return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
1393 case Expr::OMPArraySectionExprClass:
1394 return EmitOMPArraySectionExpr(cast<OMPArraySectionExpr>(E));
1395 case Expr::ExtVectorElementExprClass:
1396 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
1397 case Expr::MemberExprClass:
1398 return EmitMemberExpr(cast<MemberExpr>(E));
1399 case Expr::CompoundLiteralExprClass:
1400 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
1401 case Expr::ConditionalOperatorClass:
1402 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
1403 case Expr::BinaryConditionalOperatorClass:
1404 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
1405 case Expr::ChooseExprClass:
1406 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr());
1407 case Expr::OpaqueValueExprClass:
1408 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
1409 case Expr::SubstNonTypeTemplateParmExprClass:
1410 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
1411 case Expr::ImplicitCastExprClass:
1412 case Expr::CStyleCastExprClass:
1413 case Expr::CXXFunctionalCastExprClass:
1414 case Expr::CXXStaticCastExprClass:
1415 case Expr::CXXDynamicCastExprClass:
1416 case Expr::CXXReinterpretCastExprClass:
1417 case Expr::CXXConstCastExprClass:
1418 case Expr::CXXAddrspaceCastExprClass:
1419 case Expr::ObjCBridgedCastExprClass:
1420 return EmitCastLValue(cast<CastExpr>(E));
1421
1422 case Expr::MaterializeTemporaryExprClass:
1423 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
1424
1425 case Expr::CoawaitExprClass:
1426 return EmitCoawaitLValue(cast<CoawaitExpr>(E));
1427 case Expr::CoyieldExprClass:
1428 return EmitCoyieldLValue(cast<CoyieldExpr>(E));
1429 }
1430}
1431
1432/// Given an object of the given canonical type, can we safely copy a
1433/// value out of it based on its initializer?
1434static bool isConstantEmittableObjectType(QualType type) {
1435 assert(type.isCanonical());
1436 assert(!type->isReferenceType());
1437
1438 // Must be const-qualified but non-volatile.
1439 Qualifiers qs = type.getLocalQualifiers();
1440 if (!qs.hasConst() || qs.hasVolatile()) return false;
1441
1442 // Otherwise, all object types satisfy this except C++ classes with
1443 // mutable subobjects or non-trivial copy/destroy behavior.
1444 if (const auto *RT = dyn_cast<RecordType>(type))
1445 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1446 if (RD->hasMutableFields() || !RD->isTrivial())
1447 return false;
1448
1449 return true;
1450}
1451
1452/// Can we constant-emit a load of a reference to a variable of the
1453/// given type? This is different from predicates like
1454/// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1455/// in situations that don't necessarily satisfy the language's rules
1456/// for this (e.g. C++'s ODR-use rules). For example, we want to able
1457/// to do this with const float variables even if those variables
1458/// aren't marked 'constexpr'.
1459enum ConstantEmissionKind {
1460 CEK_None,
1461 CEK_AsReferenceOnly,
1462 CEK_AsValueOrReference,
1463 CEK_AsValueOnly
1464};
1465static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
1466 type = type.getCanonicalType();
1467 if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1468 if (isConstantEmittableObjectType(ref->getPointeeType()))
1469 return CEK_AsValueOrReference;
1470 return CEK_AsReferenceOnly;
1471 }
1472 if (isConstantEmittableObjectType(type))
1473 return CEK_AsValueOnly;
1474 return CEK_None;
1475}
1476
1477/// Try to emit a reference to the given value without producing it as
1478/// an l-value. This is just an optimization, but it avoids us needing
1479/// to emit global copies of variables if they're named without triggering
1480/// a formal use in a context where we can't emit a direct reference to them,
1481/// for instance if a block or lambda or a member of a local class uses a
1482/// const int variable or constexpr variable from an enclosing function.
1483CodeGenFunction::ConstantEmission
1484CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
1485 ValueDecl *value = refExpr->getDecl();
1486
1487 // The value needs to be an enum constant or a constant variable.
1488 ConstantEmissionKind CEK;
1489 if (isa<ParmVarDecl>(value)) {
1490 CEK = CEK_None;
1491 } else if (auto *var = dyn_cast<VarDecl>(value)) {
1492 CEK = checkVarTypeForConstantEmission(var->getType());
1493 } else if (isa<EnumConstantDecl>(value)) {
1494 CEK = CEK_AsValueOnly;
1495 } else {
1496 CEK = CEK_None;
1497 }
1498 if (CEK == CEK_None) return ConstantEmission();
1499
1500 Expr::EvalResult result;
1501 bool resultIsReference;
1502 QualType resultType;
1503
1504 // It's best to evaluate all the way as an r-value if that's permitted.
1505 if (CEK != CEK_AsReferenceOnly &&
1506 refExpr->EvaluateAsRValue(result, getContext())) {
1507 resultIsReference = false;
1508 resultType = refExpr->getType();
1509
1510 // Otherwise, try to evaluate as an l-value.
1511 } else if (CEK != CEK_AsValueOnly &&
1512 refExpr->EvaluateAsLValue(result, getContext())) {
1513 resultIsReference = true;
1514 resultType = value->getType();
1515
1516 // Failure.
1517 } else {
1518 return ConstantEmission();
1519 }
1520
1521 // In any case, if the initializer has side-effects, abandon ship.
1522 if (result.HasSideEffects)
1523 return ConstantEmission();
1524
1525 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1526 // referencing a global host variable by copy. In this case the lambda should
1527 // make a copy of the value of the global host variable. The DRE of the
1528 // captured reference variable cannot be emitted as load from the host
1529 // global variable as compile time constant, since the host variable is not
1530 // accessible on device. The DRE of the captured reference variable has to be
1531 // loaded from captures.
1532 if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1533 refExpr->refersToEnclosingVariableOrCapture()) {
1534 auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1535 if (MD && MD->getParent()->isLambda() &&
1536 MD->getOverloadedOperator() == OO_Call) {
1537 const APValue::LValueBase &base = result.Val.getLValueBase();
1538 if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1539 if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1540 if (!VD->hasAttr<CUDADeviceAttr>()) {
1541 return ConstantEmission();
1542 }
1543 }
1544 }
1545 }
1546 }
1547
1548 // Emit as a constant.
1549 auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),
1550 result.Val, resultType);
1551
1552 // Make sure we emit a debug reference to the global variable.
1553 // This should probably fire even for
1554 if (isa<VarDecl>(value)) {
1555 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
1556 EmitDeclRefExprDbgValue(refExpr, result.Val);
1557 } else {
1558 assert(isa<EnumConstantDecl>(value));
1559 EmitDeclRefExprDbgValue(refExpr, result.Val);
1560 }
1561
1562 // If we emitted a reference constant, we need to dereference that.
1563 if (resultIsReference)
1564 return ConstantEmission::forReference(C);
1565
1566 return ConstantEmission::forValue(C);
1567}
1568
1569static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF,
1570 const MemberExpr *ME) {
1571 if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
1572 // Try to emit static variable member expressions as DREs.
1573 return DeclRefExpr::Create(
1574 CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD,
1575 /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
1576 ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
1577 }
1578 return nullptr;
1579}
1580
1581CodeGenFunction::ConstantEmission
1582CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) {
1583 if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, ME))
1584 return tryEmitAsConstant(DRE);
1585 return ConstantEmission();
1586}
1587
1588llvm::Value *CodeGenFunction::emitScalarConstant(
1589 const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
1590 assert(Constant && "not a constant");
1591 if (Constant.isReference())
1592 return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
1593 E->getExprLoc())
1594 .getScalarVal();
1595 return Constant.getValue();
1596}
1597
1598llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
1599 SourceLocation Loc) {
1600 return EmitLoadOfScalar(lvalue.getAddress(*this), lvalue.isVolatile(),
1601 lvalue.getType(), Loc, lvalue.getBaseInfo(),
1602 lvalue.getTBAAInfo(), lvalue.isNontemporal());
1603}
1604
1605static bool hasBooleanRepresentation(QualType Ty) {
1606 if (Ty->isBooleanType())
1607 return true;
1608
1609 if (const EnumType *ET = Ty->getAs<EnumType>())
1610 return ET->getDecl()->getIntegerType()->isBooleanType();
1611
1612 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1613 return hasBooleanRepresentation(AT->getValueType());
1614
1615 return false;
1616}
1617
1618static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
1619 llvm::APInt &Min, llvm::APInt &End,
1620 bool StrictEnums, bool IsBool) {
1621 const EnumType *ET = Ty->getAs<EnumType>();
1622 bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
1623 ET && !ET->getDecl()->isFixed();
1624 if (!IsBool && !IsRegularCPlusPlusEnum)
1625 return false;
1626
1627 if (IsBool) {
1628 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1629 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1630 } else {
1631 const EnumDecl *ED = ET->getDecl();
1632 llvm::Type *LTy = CGF.ConvertTypeForMem(ED->getIntegerType());
1633 unsigned Bitwidth = LTy->getScalarSizeInBits();
1634 unsigned NumNegativeBits = ED->getNumNegativeBits();
1635 unsigned NumPositiveBits = ED->getNumPositiveBits();
1636
1637 if (NumNegativeBits) {
1638 unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
1639 assert(NumBits <= Bitwidth);
1640 End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
1641 Min = -End;
1642 } else {
1643 assert(NumPositiveBits <= Bitwidth);
1644 End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
1645 Min = llvm::APInt(Bitwidth, 0);
1646 }
1647 }
1648 return true;
1649}
1650
1651llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
1652 llvm::APInt Min, End;
1653 if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
1654 hasBooleanRepresentation(Ty)))
1655 return nullptr;
1656
1657 llvm::MDBuilder MDHelper(getLLVMContext());
1658 return MDHelper.createRange(Min, End);
1659}
1660
1661bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
1662 SourceLocation Loc) {
1663 bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
1664 bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
1665 if (!HasBoolCheck && !HasEnumCheck)
1666 return false;
1667
1668 bool IsBool = hasBooleanRepresentation(Ty) ||
1669 NSAPI(CGM.getContext()).isObjCBOOLType(Ty);
1670 bool NeedsBoolCheck = HasBoolCheck && IsBool;
1671 bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
1672 if (!NeedsBoolCheck && !NeedsEnumCheck)
1673 return false;
1674
1675 // Single-bit booleans don't need to be checked. Special-case this to avoid
1676 // a bit width mismatch when handling bitfield values. This is handled by
1677 // EmitFromMemory for the non-bitfield case.
1678 if (IsBool &&
1679 cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
1680 return false;
1681
1682 llvm::APInt Min, End;
1683 if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
1684 return true;
1685
1686 auto &Ctx = getLLVMContext();
1687 SanitizerScope SanScope(this);
1688 llvm::Value *Check;
1689 --End;
1690 if (!Min) {
1691 Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
1692 } else {
1693 llvm::Value *Upper =
1694 Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
1695 llvm::Value *Lower =
1696 Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
1697 Check = Builder.CreateAnd(Upper, Lower);
1698 }
1699 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
1700 EmitCheckTypeDescriptor(Ty)};
1701 SanitizerMask Kind =
1702 NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;
1703 EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
1704 StaticArgs, EmitCheckValue(Value));
1705 return true;
1706}
1707
1708llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
1709 QualType Ty,
1710 SourceLocation Loc,
1711 LValueBaseInfo BaseInfo,
1712 TBAAAccessInfo TBAAInfo,
1713 bool isNontemporal) {
1714 if (!CGM.getCodeGenOpts().PreserveVec3Type) {
1715 // For better performance, handle vector loads differently.
1716 if (Ty->isVectorType()) {
1717 const llvm::Type *EltTy = Addr.getElementType();
1718
1719 const auto *VTy = cast<llvm::FixedVectorType>(EltTy);
1720
1721 // Handle vectors of size 3 like size 4 for better performance.
1722 if (VTy->getNumElements() == 3) {
1723
1724 // Bitcast to vec4 type.
1725 auto *vec4Ty = llvm::FixedVectorType::get(VTy->getElementType(), 4);
1726 Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4");
1727 // Now load value.
1728 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
1729
1730 // Shuffle vector to get vec3.
1731 V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2},
1732 "extractVec");
1733 return EmitFromMemory(V, Ty);
1734 }
1735 }
1736 }
1737
1738 // Atomic operations have to be done on integral types.
1739 LValue AtomicLValue =
1740 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
1741 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
1742 return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
1743 }
1744
1745 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
1746 if (isNontemporal) {
1747 llvm::MDNode *Node = llvm::MDNode::get(
1748 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
1749 Load->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
1750 }
1751
1752 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
1753
1754 if (EmitScalarRangeCheck(Load, Ty, Loc)) {
1755 // In order to prevent the optimizer from throwing away the check, don't
1756 // attach range metadata to the load.
1757 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
1758 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
1759 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
1760
1761 return EmitFromMemory(Load, Ty);
1762}
1763
1764llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
1765 // Bool has a different representation in memory than in registers.
1766 if (hasBooleanRepresentation(Ty)) {
1767 // This should really always be an i1, but sometimes it's already
1768 // an i8, and it's awkward to track those cases down.
1769 if (Value->getType()->isIntegerTy(1))
1770 return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool");
1771 assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
1772 "wrong value rep of bool");
1773 }
1774
1775 return Value;
1776}
1777
1778llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
1779 // Bool has a different representation in memory than in registers.
1780 if (hasBooleanRepresentation(Ty)) {
1781 assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
1782 "wrong value rep of bool");
1783 return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
1784 }
1785
1786 return Value;
1787}
1788
1789// Convert the pointer of \p Addr to a pointer to a vector (the value type of
1790// MatrixType), if it points to a array (the memory type of MatrixType).
1791static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF,
1792 bool IsVector = true) {
1793 auto *ArrayTy = dyn_cast<llvm::ArrayType>(
1794 cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType());
1795 if (ArrayTy && IsVector) {
1796 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
1797 ArrayTy->getNumElements());
1798
1799 return Address(CGF.Builder.CreateElementBitCast(Addr, VectorTy));
1800 }
1801 auto *VectorTy = dyn_cast<llvm::VectorType>(
1802 cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType());
1803 if (VectorTy && !IsVector) {
1804 auto *ArrayTy = llvm::ArrayType::get(
1805 VectorTy->getElementType(),
1806 cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
1807
1808 return Address(CGF.Builder.CreateElementBitCast(Addr, ArrayTy));
1809 }
1810
1811 return Addr;
1812}
1813
1814// Emit a store of a matrix LValue. This may require casting the original
1815// pointer to memory address (ArrayType) to a pointer to the value type
1816// (VectorType).
1817static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
1818 bool isInit, CodeGenFunction &CGF) {
1819 Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(CGF), CGF,
1820 value->getType()->isVectorTy());
1821 CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
1822 lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
1823 lvalue.isNontemporal());
1824}
1825
1826void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
1827 bool Volatile, QualType Ty,
1828 LValueBaseInfo BaseInfo,
1829 TBAAAccessInfo TBAAInfo,
1830 bool isInit, bool isNontemporal) {
1831 if (!CGM.getCodeGenOpts().PreserveVec3Type) {
1832 // Handle vectors differently to get better performance.
1833 if (Ty->isVectorType()) {
1834 llvm::Type *SrcTy = Value->getType();
1835 auto *VecTy = dyn_cast<llvm::VectorType>(SrcTy);
1836 // Handle vec3 special.
1837 if (VecTy && cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) {
1838 // Our source is a vec3, do a shuffle vector to make it a vec4.
1839 Value = Builder.CreateShuffleVector(Value, ArrayRef<int>{0, 1, 2, -1},
1840 "extractVec");
1841 SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4);
1842 }
1843 if (Addr.getElementType() != SrcTy) {
1844 Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp");
1845 }
1846 }
1847 }
1848
1849 Value = EmitToMemory(Value, Ty);
1850
1851 LValue AtomicLValue =
1852 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
1853 if (Ty->isAtomicType() ||
1854 (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
1855 EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
1856 return;
1857 }
1858
1859 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
1860 if (isNontemporal) {
1861 llvm::MDNode *Node =
1862 llvm::MDNode::get(Store->getContext(),
1863 llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
1864 Store->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
1865 }
1866
1867 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
1868}
1869
1870void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
1871 bool isInit) {
1872 if (lvalue.getType()->isConstantMatrixType()) {
1873 EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
1874 return;
1875 }
1876
1877 EmitStoreOfScalar(value, lvalue.getAddress(*this), lvalue.isVolatile(),
1878 lvalue.getType(), lvalue.getBaseInfo(),
1879 lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
1880}
1881
1882// Emit a load of a LValue of matrix type. This may require casting the pointer
1883// to memory address (ArrayType) to a pointer to the value type (VectorType).
1884static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc,
1885 CodeGenFunction &CGF) {
1886 assert(LV.getType()->isConstantMatrixType());
1887 Address Addr = MaybeConvertMatrixAddress(LV.getAddress(CGF), CGF);
1888 LV.setAddress(Addr);
1889 return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
1890}
1891
1892/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
1893/// method emits the address of the lvalue, then loads the result as an rvalue,
1894/// returning the rvalue.
1895RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
1896 if (LV.isObjCWeak()) {
1897 // load of a __weak object.
1898 Address AddrWeakObj = LV.getAddress(*this);
1899 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
1900 AddrWeakObj));
1901 }
1902 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
1903 // In MRC mode, we do a load+autorelease.
1904 if (!getLangOpts().ObjCAutoRefCount) {
1905 return RValue::get(EmitARCLoadWeak(LV.getAddress(*this)));
1906 }
1907
1908 // In ARC mode, we load retained and then consume the value.
1909 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress(*this));
1910 Object = EmitObjCConsumeObject(LV.getType(), Object);
1911 return RValue::get(Object);
1912 }
1913
1914 if (LV.isSimple()) {
1915 assert(!LV.getType()->isFunctionType());
1916
1917 if (LV.getType()->isConstantMatrixType())
1918 return EmitLoadOfMatrixLValue(LV, Loc, *this);
1919
1920 // Everything needs a load.
1921 return RValue::get(EmitLoadOfScalar(LV, Loc));
1922 }
1923
1924 if (LV.isVectorElt()) {
1925 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
1926 LV.isVolatileQualified());
1927 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
1928 "vecext"));
1929 }
1930
1931 // If this is a reference to a subset of the elements of a vector, either
1932 // shuffle the input or extract/insert them as appropriate.
1933 if (LV.isExtVectorElt()) {
1934 return EmitLoadOfExtVectorElementLValue(LV);
1935 }
1936
1937 // Global Register variables always invoke intrinsics
1938 if (LV.isGlobalReg())
1939 return EmitLoadOfGlobalRegLValue(LV);
1940
1941 if (LV.isMatrixElt()) {
1942 llvm::LoadInst *Load =
1943 Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified());
1944 return RValue::get(
1945 Builder.CreateExtractElement(Load, LV.getMatrixIdx(), "matrixext"));
1946 }
1947
1948 assert(LV.isBitField() && "Unknown LValue type!");
1949 return EmitLoadOfBitfieldLValue(LV, Loc);
1950}
1951
1952RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
1953 SourceLocation Loc) {
1954 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
1955
1956 // Get the output type.
1957 llvm::Type *ResLTy = ConvertType(LV.getType());
1958
1959 Address Ptr = LV.getBitFieldAddress();
1960 llvm::Value *Val =
1961 Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
1962
1963 bool UseVolatile = LV.isVolatileQualified() &&
1964 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
1965 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
1966 const unsigned StorageSize =
1967 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
1968 if (Info.IsSigned) {
1969 assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
1970 unsigned HighBits = StorageSize - Offset - Info.