1 | //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This contains code to emit Aggregate Expr nodes as LLVM code. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "CGCXXABI.h" |
14 | #include "CGObjCRuntime.h" |
15 | #include "CodeGenFunction.h" |
16 | #include "CodeGenModule.h" |
17 | #include "ConstantEmitter.h" |
18 | #include "TargetInfo.h" |
19 | #include "clang/AST/ASTContext.h" |
20 | #include "clang/AST/Attr.h" |
21 | #include "clang/AST/DeclCXX.h" |
22 | #include "clang/AST/DeclTemplate.h" |
23 | #include "clang/AST/StmtVisitor.h" |
24 | #include "llvm/IR/Constants.h" |
25 | #include "llvm/IR/Function.h" |
26 | #include "llvm/IR/GlobalVariable.h" |
27 | #include "llvm/IR/IntrinsicInst.h" |
28 | #include "llvm/IR/Intrinsics.h" |
29 | using namespace clang; |
30 | using namespace CodeGen; |
31 | |
32 | //===----------------------------------------------------------------------===// |
33 | // Aggregate Expression Emitter |
34 | //===----------------------------------------------------------------------===// |
35 | |
36 | namespace { |
37 | class AggExprEmitter : public StmtVisitor<AggExprEmitter> { |
38 | CodeGenFunction &CGF; |
39 | CGBuilderTy &Builder; |
40 | AggValueSlot Dest; |
41 | bool IsResultUnused; |
42 | |
43 | AggValueSlot EnsureSlot(QualType T) { |
44 | if (!Dest.isIgnored()) return Dest; |
45 | return CGF.CreateAggTemp(T, "agg.tmp.ensured" ); |
46 | } |
47 | void EnsureDest(QualType T) { |
48 | if (!Dest.isIgnored()) return; |
49 | Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured" ); |
50 | } |
51 | |
52 | // Calls `Fn` with a valid return value slot, potentially creating a temporary |
53 | // to do so. If a temporary is created, an appropriate copy into `Dest` will |
54 | // be emitted, as will lifetime markers. |
55 | // |
56 | // The given function should take a ReturnValueSlot, and return an RValue that |
57 | // points to said slot. |
58 | void withReturnValueSlot(const Expr *E, |
59 | llvm::function_ref<RValue(ReturnValueSlot)> Fn); |
60 | |
61 | public: |
62 | AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused) |
63 | : CGF(cgf), Builder(CGF.Builder), Dest(Dest), |
64 | IsResultUnused(IsResultUnused) { } |
65 | |
66 | //===--------------------------------------------------------------------===// |
67 | // Utilities |
68 | //===--------------------------------------------------------------------===// |
69 | |
70 | /// EmitAggLoadOfLValue - Given an expression with aggregate type that |
71 | /// represents a value lvalue, this method emits the address of the lvalue, |
72 | /// then loads the result into DestPtr. |
73 | void EmitAggLoadOfLValue(const Expr *E); |
74 | |
75 | enum ExprValueKind { |
76 | EVK_RValue, |
77 | EVK_NonRValue |
78 | }; |
79 | |
80 | /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. |
81 | /// SrcIsRValue is true if source comes from an RValue. |
82 | void EmitFinalDestCopy(QualType type, const LValue &src, |
83 | ExprValueKind SrcValueKind = EVK_NonRValue); |
84 | void EmitFinalDestCopy(QualType type, RValue src); |
85 | void EmitCopy(QualType type, const AggValueSlot &dest, |
86 | const AggValueSlot &src); |
87 | |
88 | void EmitMoveFromReturnSlot(const Expr *E, RValue Src); |
89 | |
90 | void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, |
91 | QualType ArrayQTy, InitListExpr *E); |
92 | |
93 | AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) { |
94 | if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T)) |
95 | return AggValueSlot::NeedsGCBarriers; |
96 | return AggValueSlot::DoesNotNeedGCBarriers; |
97 | } |
98 | |
99 | bool TypeRequiresGCollection(QualType T); |
100 | |
101 | //===--------------------------------------------------------------------===// |
102 | // Visitor Methods |
103 | //===--------------------------------------------------------------------===// |
104 | |
105 | void Visit(Expr *E) { |
106 | ApplyDebugLocation DL(CGF, E); |
107 | StmtVisitor<AggExprEmitter>::Visit(E); |
108 | } |
109 | |
110 | void VisitStmt(Stmt *S) { |
111 | CGF.ErrorUnsupported(S, "aggregate expression" ); |
112 | } |
113 | void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); } |
114 | void VisitGenericSelectionExpr(GenericSelectionExpr *GE) { |
115 | Visit(GE->getResultExpr()); |
116 | } |
117 | void VisitCoawaitExpr(CoawaitExpr *E) { |
118 | CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused); |
119 | } |
120 | void VisitCoyieldExpr(CoyieldExpr *E) { |
121 | CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused); |
122 | } |
123 | void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); } |
124 | void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); } |
125 | void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { |
126 | return Visit(E->getReplacement()); |
127 | } |
128 | |
129 | void VisitConstantExpr(ConstantExpr *E) { |
130 | if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) { |
131 | CGF.EmitAggregateStore(Result, Dest.getAddress(), |
132 | E->getType().isVolatileQualified()); |
133 | return; |
134 | } |
135 | return Visit(E->getSubExpr()); |
136 | } |
137 | |
138 | // l-values. |
139 | void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); } |
140 | void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); } |
141 | void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); } |
142 | void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); } |
143 | void VisitCompoundLiteralExpr(CompoundLiteralExpr *E); |
144 | void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { |
145 | EmitAggLoadOfLValue(E); |
146 | } |
147 | void VisitPredefinedExpr(const PredefinedExpr *E) { |
148 | EmitAggLoadOfLValue(E); |
149 | } |
150 | |
151 | // Operators. |
152 | void VisitCastExpr(CastExpr *E); |
153 | void VisitCallExpr(const CallExpr *E); |
154 | void VisitStmtExpr(const StmtExpr *E); |
155 | void VisitBinaryOperator(const BinaryOperator *BO); |
156 | void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO); |
157 | void VisitBinAssign(const BinaryOperator *E); |
158 | void VisitBinComma(const BinaryOperator *E); |
159 | void VisitBinCmp(const BinaryOperator *E); |
160 | void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { |
161 | Visit(E->getSemanticForm()); |
162 | } |
163 | |
164 | void VisitObjCMessageExpr(ObjCMessageExpr *E); |
165 | void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { |
166 | EmitAggLoadOfLValue(E); |
167 | } |
168 | |
169 | void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E); |
170 | void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO); |
171 | void VisitChooseExpr(const ChooseExpr *CE); |
172 | void VisitInitListExpr(InitListExpr *E); |
173 | void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, |
174 | llvm::Value *outerBegin = nullptr); |
175 | void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E); |
176 | void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing. |
177 | void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { |
178 | CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE); |
179 | Visit(DAE->getExpr()); |
180 | } |
181 | void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { |
182 | CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE); |
183 | Visit(DIE->getExpr()); |
184 | } |
185 | void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); |
186 | void VisitCXXConstructExpr(const CXXConstructExpr *E); |
187 | void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E); |
188 | void VisitLambdaExpr(LambdaExpr *E); |
189 | void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E); |
190 | void VisitExprWithCleanups(ExprWithCleanups *E); |
191 | void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E); |
192 | void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); } |
193 | void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E); |
194 | void VisitOpaqueValueExpr(OpaqueValueExpr *E); |
195 | |
196 | void VisitPseudoObjectExpr(PseudoObjectExpr *E) { |
197 | if (E->isGLValue()) { |
198 | LValue LV = CGF.EmitPseudoObjectLValue(E); |
199 | return EmitFinalDestCopy(E->getType(), LV); |
200 | } |
201 | |
202 | CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType())); |
203 | } |
204 | |
205 | void VisitVAArgExpr(VAArgExpr *E); |
206 | |
207 | void EmitInitializationToLValue(Expr *E, LValue Address); |
208 | void EmitNullInitializationToLValue(LValue Address); |
209 | // case Expr::ChooseExprClass: |
210 | void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); } |
211 | void VisitAtomicExpr(AtomicExpr *E) { |
212 | RValue Res = CGF.EmitAtomicExpr(E); |
213 | EmitFinalDestCopy(E->getType(), Res); |
214 | } |
215 | }; |
216 | } // end anonymous namespace. |
217 | |
218 | //===----------------------------------------------------------------------===// |
219 | // Utilities |
220 | //===----------------------------------------------------------------------===// |
221 | |
222 | /// EmitAggLoadOfLValue - Given an expression with aggregate type that |
223 | /// represents a value lvalue, this method emits the address of the lvalue, |
224 | /// then loads the result into DestPtr. |
225 | void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) { |
226 | LValue LV = CGF.EmitLValue(E); |
227 | |
228 | // If the type of the l-value is atomic, then do an atomic load. |
229 | if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) { |
230 | CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest); |
231 | return; |
232 | } |
233 | |
234 | EmitFinalDestCopy(E->getType(), LV); |
235 | } |
236 | |
237 | /// True if the given aggregate type requires special GC API calls. |
238 | bool AggExprEmitter::TypeRequiresGCollection(QualType T) { |
239 | // Only record types have members that might require garbage collection. |
240 | const RecordType *RecordTy = T->getAs<RecordType>(); |
241 | if (!RecordTy) return false; |
242 | |
243 | // Don't mess with non-trivial C++ types. |
244 | RecordDecl *Record = RecordTy->getDecl(); |
245 | if (isa<CXXRecordDecl>(Record) && |
246 | (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() || |
247 | !cast<CXXRecordDecl>(Record)->hasTrivialDestructor())) |
248 | return false; |
249 | |
250 | // Check whether the type has an object member. |
251 | return Record->hasObjectMember(); |
252 | } |
253 | |
254 | void AggExprEmitter::withReturnValueSlot( |
255 | const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) { |
256 | QualType RetTy = E->getType(); |
257 | bool RequiresDestruction = |
258 | !Dest.isExternallyDestructed() && |
259 | RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct; |
260 | |
261 | // If it makes no observable difference, save a memcpy + temporary. |
262 | // |
263 | // We need to always provide our own temporary if destruction is required. |
264 | // Otherwise, EmitCall will emit its own, notice that it's "unused", and end |
265 | // its lifetime before we have the chance to emit a proper destructor call. |
266 | bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() || |
267 | (RequiresDestruction && !Dest.getAddress().isValid()); |
268 | |
269 | Address RetAddr = Address::invalid(); |
270 | Address RetAllocaAddr = Address::invalid(); |
271 | |
272 | EHScopeStack::stable_iterator LifetimeEndBlock; |
273 | llvm::Value *LifetimeSizePtr = nullptr; |
274 | llvm::IntrinsicInst *LifetimeStartInst = nullptr; |
275 | if (!UseTemp) { |
276 | RetAddr = Dest.getAddress(); |
277 | } else { |
278 | RetAddr = CGF.CreateMemTemp(RetTy, "tmp" , &RetAllocaAddr); |
279 | uint64_t Size = |
280 | CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy)); |
281 | LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAllocaAddr.getPointer()); |
282 | if (LifetimeSizePtr) { |
283 | LifetimeStartInst = |
284 | cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint())); |
285 | assert(LifetimeStartInst->getIntrinsicID() == |
286 | llvm::Intrinsic::lifetime_start && |
287 | "Last insertion wasn't a lifetime.start?" ); |
288 | |
289 | CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>( |
290 | NormalEHLifetimeMarker, RetAllocaAddr, LifetimeSizePtr); |
291 | LifetimeEndBlock = CGF.EHStack.stable_begin(); |
292 | } |
293 | } |
294 | |
295 | RValue Src = |
296 | EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused, |
297 | Dest.isExternallyDestructed())); |
298 | |
299 | if (!UseTemp) |
300 | return; |
301 | |
302 | assert(Dest.getPointer() != Src.getAggregatePointer()); |
303 | EmitFinalDestCopy(E->getType(), Src); |
304 | |
305 | if (!RequiresDestruction && LifetimeStartInst) { |
306 | // If there's no dtor to run, the copy was the last use of our temporary. |
307 | // Since we're not guaranteed to be in an ExprWithCleanups, clean up |
308 | // eagerly. |
309 | CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst); |
310 | CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAllocaAddr.getPointer()); |
311 | } |
312 | } |
313 | |
314 | /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. |
315 | void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) { |
316 | assert(src.isAggregate() && "value must be aggregate value!" ); |
317 | LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type); |
318 | EmitFinalDestCopy(type, srcLV, EVK_RValue); |
319 | } |
320 | |
321 | /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. |
322 | void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src, |
323 | ExprValueKind SrcValueKind) { |
324 | // If Dest is ignored, then we're evaluating an aggregate expression |
325 | // in a context that doesn't care about the result. Note that loads |
326 | // from volatile l-values force the existence of a non-ignored |
327 | // destination. |
328 | if (Dest.isIgnored()) |
329 | return; |
330 | |
331 | // Copy non-trivial C structs here. |
332 | LValue DstLV = CGF.MakeAddrLValue( |
333 | Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type); |
334 | |
335 | if (SrcValueKind == EVK_RValue) { |
336 | if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) { |
337 | if (Dest.isPotentiallyAliased()) |
338 | CGF.callCStructMoveAssignmentOperator(DstLV, src); |
339 | else |
340 | CGF.callCStructMoveConstructor(DstLV, src); |
341 | return; |
342 | } |
343 | } else { |
344 | if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { |
345 | if (Dest.isPotentiallyAliased()) |
346 | CGF.callCStructCopyAssignmentOperator(DstLV, src); |
347 | else |
348 | CGF.callCStructCopyConstructor(DstLV, src); |
349 | return; |
350 | } |
351 | } |
352 | |
353 | AggValueSlot srcAgg = AggValueSlot::forLValue( |
354 | src, CGF, AggValueSlot::IsDestructed, needsGC(type), |
355 | AggValueSlot::IsAliased, AggValueSlot::MayOverlap); |
356 | EmitCopy(type, Dest, srcAgg); |
357 | } |
358 | |
359 | /// Perform a copy from the source into the destination. |
360 | /// |
361 | /// \param type - the type of the aggregate being copied; qualifiers are |
362 | /// ignored |
363 | void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest, |
364 | const AggValueSlot &src) { |
365 | if (dest.requiresGCollection()) { |
366 | CharUnits sz = dest.getPreferredSize(CGF.getContext(), type); |
367 | llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity()); |
368 | CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, |
369 | dest.getAddress(), |
370 | src.getAddress(), |
371 | size); |
372 | return; |
373 | } |
374 | |
375 | // If the result of the assignment is used, copy the LHS there also. |
376 | // It's volatile if either side is. Use the minimum alignment of |
377 | // the two sides. |
378 | LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type); |
379 | LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type); |
380 | CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(), |
381 | dest.isVolatile() || src.isVolatile()); |
382 | } |
383 | |
384 | /// Emit the initializer for a std::initializer_list initialized with a |
385 | /// real initializer list. |
386 | void |
387 | AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { |
388 | // Emit an array containing the elements. The array is externally destructed |
389 | // if the std::initializer_list object is. |
390 | ASTContext &Ctx = CGF.getContext(); |
391 | LValue Array = CGF.EmitLValue(E->getSubExpr()); |
392 | assert(Array.isSimple() && "initializer_list array not a simple lvalue" ); |
393 | Address ArrayPtr = Array.getAddress(CGF); |
394 | |
395 | const ConstantArrayType *ArrayType = |
396 | Ctx.getAsConstantArrayType(E->getSubExpr()->getType()); |
397 | assert(ArrayType && "std::initializer_list constructed from non-array" ); |
398 | |
399 | // FIXME: Perform the checks on the field types in SemaInit. |
400 | RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl(); |
401 | RecordDecl::field_iterator Field = Record->field_begin(); |
402 | if (Field == Record->field_end()) { |
403 | CGF.ErrorUnsupported(E, "weird std::initializer_list" ); |
404 | return; |
405 | } |
406 | |
407 | // Start pointer. |
408 | if (!Field->getType()->isPointerType() || |
409 | !Ctx.hasSameType(Field->getType()->getPointeeType(), |
410 | ArrayType->getElementType())) { |
411 | CGF.ErrorUnsupported(E, "weird std::initializer_list" ); |
412 | return; |
413 | } |
414 | |
415 | AggValueSlot Dest = EnsureSlot(E->getType()); |
416 | LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType()); |
417 | LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field); |
418 | llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0); |
419 | llvm::Value *IdxStart[] = { Zero, Zero }; |
420 | llvm::Value *ArrayStart = |
421 | Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxStart, "arraystart" ); |
422 | CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start); |
423 | ++Field; |
424 | |
425 | if (Field == Record->field_end()) { |
426 | CGF.ErrorUnsupported(E, "weird std::initializer_list" ); |
427 | return; |
428 | } |
429 | |
430 | llvm::Value *Size = Builder.getInt(ArrayType->getSize()); |
431 | LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field); |
432 | if (Field->getType()->isPointerType() && |
433 | Ctx.hasSameType(Field->getType()->getPointeeType(), |
434 | ArrayType->getElementType())) { |
435 | // End pointer. |
436 | llvm::Value *IdxEnd[] = { Zero, Size }; |
437 | llvm::Value *ArrayEnd = |
438 | Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxEnd, "arrayend" ); |
439 | CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength); |
440 | } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) { |
441 | // Length. |
442 | CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength); |
443 | } else { |
444 | CGF.ErrorUnsupported(E, "weird std::initializer_list" ); |
445 | return; |
446 | } |
447 | } |
448 | |
449 | /// Determine if E is a trivial array filler, that is, one that is |
450 | /// equivalent to zero-initialization. |
451 | static bool isTrivialFiller(Expr *E) { |
452 | if (!E) |
453 | return true; |
454 | |
455 | if (isa<ImplicitValueInitExpr>(E)) |
456 | return true; |
457 | |
458 | if (auto *ILE = dyn_cast<InitListExpr>(E)) { |
459 | if (ILE->getNumInits()) |
460 | return false; |
461 | return isTrivialFiller(ILE->getArrayFiller()); |
462 | } |
463 | |
464 | if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E)) |
465 | return Cons->getConstructor()->isDefaultConstructor() && |
466 | Cons->getConstructor()->isTrivial(); |
467 | |
468 | // FIXME: Are there other cases where we can avoid emitting an initializer? |
469 | return false; |
470 | } |
471 | |
472 | /// Emit initialization of an array from an initializer list. |
473 | void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, |
474 | QualType ArrayQTy, InitListExpr *E) { |
475 | uint64_t NumInitElements = E->getNumInits(); |
476 | |
477 | uint64_t NumArrayElements = AType->getNumElements(); |
478 | assert(NumInitElements <= NumArrayElements); |
479 | |
480 | QualType elementType = |
481 | CGF.getContext().getAsArrayType(ArrayQTy)->getElementType(); |
482 | |
483 | // DestPtr is an array*. Construct an elementType* by drilling |
484 | // down a level. |
485 | llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); |
486 | llvm::Value *indices[] = { zero, zero }; |
487 | llvm::Value *begin = |
488 | Builder.CreateInBoundsGEP(DestPtr.getPointer(), indices, "arrayinit.begin" ); |
489 | |
490 | CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType); |
491 | CharUnits elementAlign = |
492 | DestPtr.getAlignment().alignmentOfArrayElement(elementSize); |
493 | |
494 | // Consider initializing the array by copying from a global. For this to be |
495 | // more efficient than per-element initialization, the size of the elements |
496 | // with explicit initializers should be large enough. |
497 | if (NumInitElements * elementSize.getQuantity() > 16 && |
498 | elementType.isTriviallyCopyableType(CGF.getContext())) { |
499 | CodeGen::CodeGenModule &CGM = CGF.CGM; |
500 | ConstantEmitter Emitter(CGF); |
501 | LangAS AS = ArrayQTy.getAddressSpace(); |
502 | if (llvm::Constant *C = Emitter.tryEmitForInitializer(E, AS, ArrayQTy)) { |
503 | auto GV = new llvm::GlobalVariable( |
504 | CGM.getModule(), C->getType(), |
505 | CGM.isTypeConstant(ArrayQTy, /* ExcludeCtorDtor= */ true), |
506 | llvm::GlobalValue::PrivateLinkage, C, "constinit" , |
507 | /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal, |
508 | CGM.getContext().getTargetAddressSpace(AS)); |
509 | Emitter.finalize(GV); |
510 | CharUnits Align = CGM.getContext().getTypeAlignInChars(ArrayQTy); |
511 | GV->setAlignment(Align.getAsAlign()); |
512 | EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GV, ArrayQTy, Align)); |
513 | return; |
514 | } |
515 | } |
516 | |
517 | // Exception safety requires us to destroy all the |
518 | // already-constructed members if an initializer throws. |
519 | // For that, we'll need an EH cleanup. |
520 | QualType::DestructionKind dtorKind = elementType.isDestructedType(); |
521 | Address endOfInit = Address::invalid(); |
522 | EHScopeStack::stable_iterator cleanup; |
523 | llvm::Instruction *cleanupDominator = nullptr; |
524 | if (CGF.needsEHCleanup(dtorKind)) { |
525 | // In principle we could tell the cleanup where we are more |
526 | // directly, but the control flow can get so varied here that it |
527 | // would actually be quite complex. Therefore we go through an |
528 | // alloca. |
529 | endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(), |
530 | "arrayinit.endOfInit" ); |
531 | cleanupDominator = Builder.CreateStore(begin, endOfInit); |
532 | CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType, |
533 | elementAlign, |
534 | CGF.getDestroyer(dtorKind)); |
535 | cleanup = CGF.EHStack.stable_begin(); |
536 | |
537 | // Otherwise, remember that we didn't need a cleanup. |
538 | } else { |
539 | dtorKind = QualType::DK_none; |
540 | } |
541 | |
542 | llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1); |
543 | |
544 | // The 'current element to initialize'. The invariants on this |
545 | // variable are complicated. Essentially, after each iteration of |
546 | // the loop, it points to the last initialized element, except |
547 | // that it points to the beginning of the array before any |
548 | // elements have been initialized. |
549 | llvm::Value *element = begin; |
550 | |
551 | // Emit the explicit initializers. |
552 | for (uint64_t i = 0; i != NumInitElements; ++i) { |
553 | // Advance to the next element. |
554 | if (i > 0) { |
555 | element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element" ); |
556 | |
557 | // Tell the cleanup that it needs to destroy up to this |
558 | // element. TODO: some of these stores can be trivially |
559 | // observed to be unnecessary. |
560 | if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit); |
561 | } |
562 | |
563 | LValue elementLV = |
564 | CGF.MakeAddrLValue(Address(element, elementAlign), elementType); |
565 | EmitInitializationToLValue(E->getInit(i), elementLV); |
566 | } |
567 | |
568 | // Check whether there's a non-trivial array-fill expression. |
569 | Expr *filler = E->getArrayFiller(); |
570 | bool hasTrivialFiller = isTrivialFiller(filler); |
571 | |
572 | // Any remaining elements need to be zero-initialized, possibly |
573 | // using the filler expression. We can skip this if the we're |
574 | // emitting to zeroed memory. |
575 | if (NumInitElements != NumArrayElements && |
576 | !(Dest.isZeroed() && hasTrivialFiller && |
577 | CGF.getTypes().isZeroInitializable(elementType))) { |
578 | |
579 | // Use an actual loop. This is basically |
580 | // do { *array++ = filler; } while (array != end); |
581 | |
582 | // Advance to the start of the rest of the array. |
583 | if (NumInitElements) { |
584 | element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start" ); |
585 | if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit); |
586 | } |
587 | |
588 | // Compute the end of the array. |
589 | llvm::Value *end = Builder.CreateInBoundsGEP(begin, |
590 | llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), |
591 | "arrayinit.end" ); |
592 | |
593 | llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); |
594 | llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body" ); |
595 | |
596 | // Jump into the body. |
597 | CGF.EmitBlock(bodyBB); |
598 | llvm::PHINode *currentElement = |
599 | Builder.CreatePHI(element->getType(), 2, "arrayinit.cur" ); |
600 | currentElement->addIncoming(element, entryBB); |
601 | |
602 | // Emit the actual filler expression. |
603 | { |
604 | // C++1z [class.temporary]p5: |
605 | // when a default constructor is called to initialize an element of |
606 | // an array with no corresponding initializer [...] the destruction of |
607 | // every temporary created in a default argument is sequenced before |
608 | // the construction of the next array element, if any |
609 | CodeGenFunction::RunCleanupsScope CleanupsScope(CGF); |
610 | LValue elementLV = |
611 | CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType); |
612 | if (filler) |
613 | EmitInitializationToLValue(filler, elementLV); |
614 | else |
615 | EmitNullInitializationToLValue(elementLV); |
616 | } |
617 | |
618 | // Move on to the next element. |
619 | llvm::Value *nextElement = |
620 | Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next" ); |
621 | |
622 | // Tell the EH cleanup that we finished with the last element. |
623 | if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit); |
624 | |
625 | // Leave the loop if we're done. |
626 | llvm::Value *done = Builder.CreateICmpEQ(nextElement, end, |
627 | "arrayinit.done" ); |
628 | llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end" ); |
629 | Builder.CreateCondBr(done, endBB, bodyBB); |
630 | currentElement->addIncoming(nextElement, Builder.GetInsertBlock()); |
631 | |
632 | CGF.EmitBlock(endBB); |
633 | } |
634 | |
635 | // Leave the partial-array cleanup if we entered one. |
636 | if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator); |
637 | } |
638 | |
639 | //===----------------------------------------------------------------------===// |
640 | // Visitor Methods |
641 | //===----------------------------------------------------------------------===// |
642 | |
643 | void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){ |
644 | Visit(E->getSubExpr()); |
645 | } |
646 | |
647 | void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) { |
648 | // If this is a unique OVE, just visit its source expression. |
649 | if (e->isUnique()) |
650 | Visit(e->getSourceExpr()); |
651 | else |
652 | EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e)); |
653 | } |
654 | |
655 | void |
656 | AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { |
657 | if (Dest.isPotentiallyAliased() && |
658 | E->getType().isPODType(CGF.getContext())) { |
659 | // For a POD type, just emit a load of the lvalue + a copy, because our |
660 | // compound literal might alias the destination. |
661 | EmitAggLoadOfLValue(E); |
662 | return; |
663 | } |
664 | |
665 | AggValueSlot Slot = EnsureSlot(E->getType()); |
666 | |
667 | // Block-scope compound literals are destroyed at the end of the enclosing |
668 | // scope in C. |
669 | bool Destruct = |
670 | !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed(); |
671 | if (Destruct) |
672 | Slot.setExternallyDestructed(); |
673 | |
674 | CGF.EmitAggExpr(E->getInitializer(), Slot); |
675 | |
676 | if (Destruct) |
677 | if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) |
678 | CGF.pushLifetimeExtendedDestroy( |
679 | CGF.getCleanupKind(DtorKind), Slot.getAddress(), E->getType(), |
680 | CGF.getDestroyer(DtorKind), DtorKind & EHCleanup); |
681 | } |
682 | |
683 | /// Attempt to look through various unimportant expressions to find a |
684 | /// cast of the given kind. |
685 | static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) { |
686 | op = op->IgnoreParenNoopCasts(ctx); |
687 | if (auto castE = dyn_cast<CastExpr>(op)) { |
688 | if (castE->getCastKind() == kind) |
689 | return castE->getSubExpr(); |
690 | } |
691 | return nullptr; |
692 | } |
693 | |
694 | void AggExprEmitter::VisitCastExpr(CastExpr *E) { |
695 | if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E)) |
696 | CGF.CGM.EmitExplicitCastExprType(ECE, &CGF); |
697 | switch (E->getCastKind()) { |
698 | case CK_Dynamic: { |
699 | // FIXME: Can this actually happen? We have no test coverage for it. |
700 | assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?" ); |
701 | LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(), |
702 | CodeGenFunction::TCK_Load); |
703 | // FIXME: Do we also need to handle property references here? |
704 | if (LV.isSimple()) |
705 | CGF.EmitDynamicCast(LV.getAddress(CGF), cast<CXXDynamicCastExpr>(E)); |
706 | else |
707 | CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast" ); |
708 | |
709 | if (!Dest.isIgnored()) |
710 | CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination" ); |
711 | break; |
712 | } |
713 | |
714 | case CK_ToUnion: { |
715 | // Evaluate even if the destination is ignored. |
716 | if (Dest.isIgnored()) { |
717 | CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), |
718 | /*ignoreResult=*/true); |
719 | break; |
720 | } |
721 | |
722 | // GCC union extension |
723 | QualType Ty = E->getSubExpr()->getType(); |
724 | Address CastPtr = |
725 | Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty)); |
726 | EmitInitializationToLValue(E->getSubExpr(), |
727 | CGF.MakeAddrLValue(CastPtr, Ty)); |
728 | break; |
729 | } |
730 | |
731 | case CK_LValueToRValueBitCast: { |
732 | if (Dest.isIgnored()) { |
733 | CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), |
734 | /*ignoreResult=*/true); |
735 | break; |
736 | } |
737 | |
738 | LValue SourceLV = CGF.EmitLValue(E->getSubExpr()); |
739 | Address SourceAddress = |
740 | Builder.CreateElementBitCast(SourceLV.getAddress(CGF), CGF.Int8Ty); |
741 | Address DestAddress = |
742 | Builder.CreateElementBitCast(Dest.getAddress(), CGF.Int8Ty); |
743 | llvm::Value *SizeVal = llvm::ConstantInt::get( |
744 | CGF.SizeTy, |
745 | CGF.getContext().getTypeSizeInChars(E->getType()).getQuantity()); |
746 | Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal); |
747 | break; |
748 | } |
749 | |
750 | case CK_DerivedToBase: |
751 | case CK_BaseToDerived: |
752 | case CK_UncheckedDerivedToBase: { |
753 | llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: " |
754 | "should have been unpacked before we got here" ); |
755 | } |
756 | |
757 | case CK_NonAtomicToAtomic: |
758 | case CK_AtomicToNonAtomic: { |
759 | bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic); |
760 | |
761 | // Determine the atomic and value types. |
762 | QualType atomicType = E->getSubExpr()->getType(); |
763 | QualType valueType = E->getType(); |
764 | if (isToAtomic) std::swap(atomicType, valueType); |
765 | |
766 | assert(atomicType->isAtomicType()); |
767 | assert(CGF.getContext().hasSameUnqualifiedType(valueType, |
768 | atomicType->castAs<AtomicType>()->getValueType())); |
769 | |
770 | // Just recurse normally if we're ignoring the result or the |
771 | // atomic type doesn't change representation. |
772 | if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) { |
773 | return Visit(E->getSubExpr()); |
774 | } |
775 | |
776 | CastKind peepholeTarget = |
777 | (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic); |
778 | |
779 | // These two cases are reverses of each other; try to peephole them. |
780 | if (Expr *op = |
781 | findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) { |
782 | assert(CGF.getContext().hasSameUnqualifiedType(op->getType(), |
783 | E->getType()) && |
784 | "peephole significantly changed types?" ); |
785 | return Visit(op); |
786 | } |
787 | |
788 | // If we're converting an r-value of non-atomic type to an r-value |
789 | // of atomic type, just emit directly into the relevant sub-object. |
790 | if (isToAtomic) { |
791 | AggValueSlot valueDest = Dest; |
792 | if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) { |
793 | // Zero-initialize. (Strictly speaking, we only need to initialize |
794 | // the padding at the end, but this is simpler.) |
795 | if (!Dest.isZeroed()) |
796 | CGF.EmitNullInitialization(Dest.getAddress(), atomicType); |
797 | |
798 | // Build a GEP to refer to the subobject. |
799 | Address valueAddr = |
800 | CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0); |
801 | valueDest = AggValueSlot::forAddr(valueAddr, |
802 | valueDest.getQualifiers(), |
803 | valueDest.isExternallyDestructed(), |
804 | valueDest.requiresGCollection(), |
805 | valueDest.isPotentiallyAliased(), |
806 | AggValueSlot::DoesNotOverlap, |
807 | AggValueSlot::IsZeroed); |
808 | } |
809 | |
810 | CGF.EmitAggExpr(E->getSubExpr(), valueDest); |
811 | return; |
812 | } |
813 | |
814 | // Otherwise, we're converting an atomic type to a non-atomic type. |
815 | // Make an atomic temporary, emit into that, and then copy the value out. |
816 | AggValueSlot atomicSlot = |
817 | CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp" ); |
818 | CGF.EmitAggExpr(E->getSubExpr(), atomicSlot); |
819 | |
820 | Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0); |
821 | RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile()); |
822 | return EmitFinalDestCopy(valueType, rvalue); |
823 | } |
824 | case CK_AddressSpaceConversion: |
825 | return Visit(E->getSubExpr()); |
826 | |
827 | case CK_LValueToRValue: |
828 | // If we're loading from a volatile type, force the destination |
829 | // into existence. |
830 | if (E->getSubExpr()->getType().isVolatileQualified()) { |
831 | bool Destruct = |
832 | !Dest.isExternallyDestructed() && |
833 | E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct; |
834 | if (Destruct) |
835 | Dest.setExternallyDestructed(); |
836 | EnsureDest(E->getType()); |
837 | Visit(E->getSubExpr()); |
838 | |
839 | if (Destruct) |
840 | CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(), |
841 | E->getType()); |
842 | |
843 | return; |
844 | } |
845 | |
846 | LLVM_FALLTHROUGH; |
847 | |
848 | |
849 | case CK_NoOp: |
850 | case CK_UserDefinedConversion: |
851 | case CK_ConstructorConversion: |
852 | assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), |
853 | E->getType()) && |
854 | "Implicit cast types must be compatible" ); |
855 | Visit(E->getSubExpr()); |
856 | break; |
857 | |
858 | case CK_LValueBitCast: |
859 | llvm_unreachable("should not be emitting lvalue bitcast as rvalue" ); |
860 | |
861 | case CK_Dependent: |
862 | case CK_BitCast: |
863 | case CK_ArrayToPointerDecay: |
864 | case CK_FunctionToPointerDecay: |
865 | case CK_NullToPointer: |
866 | case CK_NullToMemberPointer: |
867 | case CK_BaseToDerivedMemberPointer: |
868 | case CK_DerivedToBaseMemberPointer: |
869 | case CK_MemberPointerToBoolean: |
870 | case CK_ReinterpretMemberPointer: |
871 | case CK_IntegralToPointer: |
872 | case CK_PointerToIntegral: |
873 | case CK_PointerToBoolean: |
874 | case CK_ToVoid: |
875 | case CK_VectorSplat: |
876 | case CK_IntegralCast: |
877 | case CK_BooleanToSignedIntegral: |
878 | case CK_IntegralToBoolean: |
879 | case CK_IntegralToFloating: |
880 | case CK_FloatingToIntegral: |
881 | case CK_FloatingToBoolean: |
882 | case CK_FloatingCast: |
883 | case CK_CPointerToObjCPointerCast: |
884 | case CK_BlockPointerToObjCPointerCast: |
885 | case CK_AnyPointerToBlockPointerCast: |
886 | case CK_ObjCObjectLValueCast: |
887 | case CK_FloatingRealToComplex: |
888 | case CK_FloatingComplexToReal: |
889 | case CK_FloatingComplexToBoolean: |
890 | case CK_FloatingComplexCast: |
891 | case CK_FloatingComplexToIntegralComplex: |
892 | case CK_IntegralRealToComplex: |
893 | case CK_IntegralComplexToReal: |
894 | case CK_IntegralComplexToBoolean: |
895 | case CK_IntegralComplexCast: |
896 | case CK_IntegralComplexToFloatingComplex: |
897 | case CK_ARCProduceObject: |
898 | case CK_ARCConsumeObject: |
899 | case CK_ARCReclaimReturnedObject: |
900 | case CK_ARCExtendBlockObject: |
901 | case CK_CopyAndAutoreleaseBlockObject: |
902 | case CK_BuiltinFnToFnPtr: |
903 | case CK_ZeroToOCLOpaqueType: |
904 | case CK_MatrixCast: |
905 | |
906 | case CK_IntToOCLSampler: |
907 | case CK_FloatingToFixedPoint: |
908 | case CK_FixedPointToFloating: |
909 | case CK_FixedPointCast: |
910 | case CK_FixedPointToBoolean: |
911 | case CK_FixedPointToIntegral: |
912 | case CK_IntegralToFixedPoint: |
913 | llvm_unreachable("cast kind invalid for aggregate types" ); |
914 | } |
915 | } |
916 | |
917 | void AggExprEmitter::VisitCallExpr(const CallExpr *E) { |
918 | if (E->getCallReturnType(CGF.getContext())->isReferenceType()) { |
919 | EmitAggLoadOfLValue(E); |
920 | return; |
921 | } |
922 | |
923 | withReturnValueSlot(E, [&](ReturnValueSlot Slot) { |
924 | return CGF.EmitCallExpr(E, Slot); |
925 | }); |
926 | } |
927 | |
928 | void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) { |
929 | withReturnValueSlot(E, [&](ReturnValueSlot Slot) { |
930 | return CGF.EmitObjCMessageExpr(E, Slot); |
931 | }); |
932 | } |
933 | |
934 | void AggExprEmitter::VisitBinComma(const BinaryOperator *E) { |
935 | CGF.EmitIgnoredExpr(E->getLHS()); |
936 | Visit(E->getRHS()); |
937 | } |
938 | |
939 | void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) { |
940 | CodeGenFunction::StmtExprEvaluation eval(CGF); |
941 | CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest); |
942 | } |
943 | |
944 | enum CompareKind { |
945 | CK_Less, |
946 | CK_Greater, |
947 | CK_Equal, |
948 | }; |
949 | |
950 | static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, |
951 | const BinaryOperator *E, llvm::Value *LHS, |
952 | llvm::Value *RHS, CompareKind Kind, |
953 | const char *NameSuffix = "" ) { |
954 | QualType ArgTy = E->getLHS()->getType(); |
955 | if (const ComplexType *CT = ArgTy->getAs<ComplexType>()) |
956 | ArgTy = CT->getElementType(); |
957 | |
958 | if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) { |
959 | assert(Kind == CK_Equal && |
960 | "member pointers may only be compared for equality" ); |
961 | return CGF.CGM.getCXXABI().EmitMemberPointerComparison( |
962 | CGF, LHS, RHS, MPT, /*IsInequality*/ false); |
963 | } |
964 | |
965 | // Compute the comparison instructions for the specified comparison kind. |
966 | struct CmpInstInfo { |
967 | const char *Name; |
968 | llvm::CmpInst::Predicate FCmp; |
969 | llvm::CmpInst::Predicate SCmp; |
970 | llvm::CmpInst::Predicate UCmp; |
971 | }; |
972 | CmpInstInfo InstInfo = [&]() -> CmpInstInfo { |
973 | using FI = llvm::FCmpInst; |
974 | using II = llvm::ICmpInst; |
975 | switch (Kind) { |
976 | case CK_Less: |
977 | return {"cmp.lt" , FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT}; |
978 | case CK_Greater: |
979 | return {"cmp.gt" , FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT}; |
980 | case CK_Equal: |
981 | return {"cmp.eq" , FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ}; |
982 | } |
983 | llvm_unreachable("Unrecognised CompareKind enum" ); |
984 | }(); |
985 | |
986 | if (ArgTy->hasFloatingRepresentation()) |
987 | return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS, |
988 | llvm::Twine(InstInfo.Name) + NameSuffix); |
989 | if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) { |
990 | auto Inst = |
991 | ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp; |
992 | return Builder.CreateICmp(Inst, LHS, RHS, |
993 | llvm::Twine(InstInfo.Name) + NameSuffix); |
994 | } |
995 | |
996 | llvm_unreachable("unsupported aggregate binary expression should have " |
997 | "already been handled" ); |
998 | } |
999 | |
1000 | void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) { |
1001 | using llvm::BasicBlock; |
1002 | using llvm::PHINode; |
1003 | using llvm::Value; |
1004 | assert(CGF.getContext().hasSameType(E->getLHS()->getType(), |
1005 | E->getRHS()->getType())); |
1006 | const ComparisonCategoryInfo &CmpInfo = |
1007 | CGF.getContext().CompCategories.getInfoForType(E->getType()); |
1008 | assert(CmpInfo.Record->isTriviallyCopyable() && |
1009 | "cannot copy non-trivially copyable aggregate" ); |
1010 | |
1011 | QualType ArgTy = E->getLHS()->getType(); |
1012 | |
1013 | if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() && |
1014 | !ArgTy->isNullPtrType() && !ArgTy->isPointerType() && |
1015 | !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) { |
1016 | return CGF.ErrorUnsupported(E, "aggregate three-way comparison" ); |
1017 | } |
1018 | bool IsComplex = ArgTy->isAnyComplexType(); |
1019 | |
1020 | // Evaluate the operands to the expression and extract their values. |
1021 | auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> { |
1022 | RValue RV = CGF.EmitAnyExpr(E); |
1023 | if (RV.isScalar()) |
1024 | return {RV.getScalarVal(), nullptr}; |
1025 | if (RV.isAggregate()) |
1026 | return {RV.getAggregatePointer(), nullptr}; |
1027 | assert(RV.isComplex()); |
1028 | return RV.getComplexVal(); |
1029 | }; |
1030 | auto LHSValues = EmitOperand(E->getLHS()), |
1031 | RHSValues = EmitOperand(E->getRHS()); |
1032 | |
1033 | auto EmitCmp = [&](CompareKind K) { |
1034 | Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first, |
1035 | K, IsComplex ? ".r" : "" ); |
1036 | if (!IsComplex) |
1037 | return Cmp; |
1038 | assert(K == CompareKind::CK_Equal); |
1039 | Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second, |
1040 | RHSValues.second, K, ".i" ); |
1041 | return Builder.CreateAnd(Cmp, CmpImag, "and.eq" ); |
1042 | }; |
1043 | auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) { |
1044 | return Builder.getInt(VInfo->getIntValue()); |
1045 | }; |
1046 | |
1047 | Value *Select; |
1048 | if (ArgTy->isNullPtrType()) { |
1049 | Select = EmitCmpRes(CmpInfo.getEqualOrEquiv()); |
1050 | } else if (!CmpInfo.isPartial()) { |
1051 | Value *SelectOne = |
1052 | Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), |
1053 | EmitCmpRes(CmpInfo.getGreater()), "sel.lt" ); |
1054 | Select = Builder.CreateSelect(EmitCmp(CK_Equal), |
1055 | EmitCmpRes(CmpInfo.getEqualOrEquiv()), |
1056 | SelectOne, "sel.eq" ); |
1057 | } else { |
1058 | Value *SelectEq = Builder.CreateSelect( |
1059 | EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()), |
1060 | EmitCmpRes(CmpInfo.getUnordered()), "sel.eq" ); |
1061 | Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater), |
1062 | EmitCmpRes(CmpInfo.getGreater()), |
1063 | SelectEq, "sel.gt" ); |
1064 | Select = Builder.CreateSelect( |
1065 | EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt" ); |
1066 | } |
1067 | // Create the return value in the destination slot. |
1068 | EnsureDest(E->getType()); |
1069 | LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType()); |
1070 | |
1071 | // Emit the address of the first (and only) field in the comparison category |
1072 | // type, and initialize it from the constant integer value selected above. |
1073 | LValue FieldLV = CGF.EmitLValueForFieldInitialization( |
1074 | DestLV, *CmpInfo.Record->field_begin()); |
1075 | CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true); |
1076 | |
1077 | // All done! The result is in the Dest slot. |
1078 | } |
1079 | |
1080 | void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) { |
1081 | if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) |
1082 | VisitPointerToDataMemberBinaryOperator(E); |
1083 | else |
1084 | CGF.ErrorUnsupported(E, "aggregate binary expression" ); |
1085 | } |
1086 | |
1087 | void AggExprEmitter::VisitPointerToDataMemberBinaryOperator( |
1088 | const BinaryOperator *E) { |
1089 | LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E); |
1090 | EmitFinalDestCopy(E->getType(), LV); |
1091 | } |
1092 | |
1093 | /// Is the value of the given expression possibly a reference to or |
1094 | /// into a __block variable? |
1095 | static bool isBlockVarRef(const Expr *E) { |
1096 | // Make sure we look through parens. |
1097 | E = E->IgnoreParens(); |
1098 | |
1099 | // Check for a direct reference to a __block variable. |
1100 | if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { |
1101 | const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl()); |
1102 | return (var && var->hasAttr<BlocksAttr>()); |
1103 | } |
1104 | |
1105 | // More complicated stuff. |
1106 | |
1107 | // Binary operators. |
1108 | if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) { |
1109 | // For an assignment or pointer-to-member operation, just care |
1110 | // about the LHS. |
1111 | if (op->isAssignmentOp() || op->isPtrMemOp()) |
1112 | return isBlockVarRef(op->getLHS()); |
1113 | |
1114 | // For a comma, just care about the RHS. |
1115 | if (op->getOpcode() == BO_Comma) |
1116 | return isBlockVarRef(op->getRHS()); |
1117 | |
1118 | // FIXME: pointer arithmetic? |
1119 | return false; |
1120 | |
1121 | // Check both sides of a conditional operator. |
1122 | } else if (const AbstractConditionalOperator *op |
1123 | = dyn_cast<AbstractConditionalOperator>(E)) { |
1124 | return isBlockVarRef(op->getTrueExpr()) |
1125 | || isBlockVarRef(op->getFalseExpr()); |
1126 | |
1127 | // OVEs are required to support BinaryConditionalOperators. |
1128 | } else if (const OpaqueValueExpr *op |
1129 | = dyn_cast<OpaqueValueExpr>(E)) { |
1130 | if (const Expr *src = op->getSourceExpr()) |
1131 | return isBlockVarRef(src); |
1132 | |
1133 | // Casts are necessary to get things like (*(int*)&var) = foo(). |
1134 | // We don't really care about the kind of cast here, except |
1135 | // we don't want to look through l2r casts, because it's okay |
1136 | // to get the *value* in a __block variable. |
1137 | } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) { |
1138 | if (cast->getCastKind() == CK_LValueToRValue) |
1139 | return false; |
1140 | return isBlockVarRef(cast->getSubExpr()); |
1141 | |
1142 | // Handle unary operators. Again, just aggressively look through |
1143 | // it, ignoring the operation. |
1144 | } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) { |
1145 | return isBlockVarRef(uop->getSubExpr()); |
1146 | |
1147 | // Look into the base of a field access. |
1148 | } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) { |
1149 | return isBlockVarRef(mem->getBase()); |
1150 | |
1151 | // Look into the base of a subscript. |
1152 | } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) { |
1153 | return isBlockVarRef(sub->getBase()); |
1154 | } |
1155 | |
1156 | return false; |
1157 | } |
1158 | |
1159 | void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { |
1160 | // For an assignment to work, the value on the right has |
1161 | // to be compatible with the value on the left. |
1162 | assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), |
1163 | E->getRHS()->getType()) |
1164 | && "Invalid assignment" ); |
1165 | |
1166 | // If the LHS might be a __block variable, and the RHS can |
1167 | // potentially cause a block copy, we need to evaluate the RHS first |
1168 | // so that the assignment goes the right place. |
1169 | // This is pretty semantically fragile. |
1170 | if (isBlockVarRef(E->getLHS()) && |
1171 | E->getRHS()->HasSideEffects(CGF.getContext())) { |
1172 | // Ensure that we have a destination, and evaluate the RHS into that. |
1173 | EnsureDest(E->getRHS()->getType()); |
1174 | Visit(E->getRHS()); |
1175 | |
1176 | // Now emit the LHS and copy into it. |
1177 | LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); |
1178 | |
1179 | // That copy is an atomic copy if the LHS is atomic. |
1180 | if (LHS.getType()->isAtomicType() || |
1181 | CGF.LValueIsSuitableForInlineAtomic(LHS)) { |
1182 | CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false); |
1183 | return; |
1184 | } |
1185 | |
1186 | EmitCopy(E->getLHS()->getType(), |
1187 | AggValueSlot::forLValue(LHS, CGF, AggValueSlot::IsDestructed, |
1188 | needsGC(E->getLHS()->getType()), |
1189 | AggValueSlot::IsAliased, |
1190 | AggValueSlot::MayOverlap), |
1191 | Dest); |
1192 | return; |
1193 | } |
1194 | |
1195 | LValue LHS = CGF.EmitLValue(E->getLHS()); |
1196 | |
1197 | // If we have an atomic type, evaluate into the destination and then |
1198 | // do an atomic copy. |
1199 | if (LHS.getType()->isAtomicType() || |
1200 | CGF.LValueIsSuitableForInlineAtomic(LHS)) { |
1201 | EnsureDest(E->getRHS()->getType()); |
1202 | Visit(E->getRHS()); |
1203 | CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false); |
1204 | return; |
1205 | } |
1206 | |
1207 | // Codegen the RHS so that it stores directly into the LHS. |
1208 | AggValueSlot LHSSlot = AggValueSlot::forLValue( |
1209 | LHS, CGF, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()), |
1210 | AggValueSlot::IsAliased, AggValueSlot::MayOverlap); |
1211 | // A non-volatile aggregate destination might have volatile member. |
1212 | if (!LHSSlot.isVolatile() && |
1213 | CGF.hasVolatileMember(E->getLHS()->getType())) |
1214 | LHSSlot.setVolatile(true); |
1215 | |
1216 | CGF.EmitAggExpr(E->getRHS(), LHSSlot); |
1217 | |
1218 | // Copy into the destination if the assignment isn't ignored. |
1219 | EmitFinalDestCopy(E->getType(), LHS); |
1220 | |
1221 | if (!Dest.isIgnored() && !Dest.isExternallyDestructed() && |
1222 | E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct) |
1223 | CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(), |
1224 | E->getType()); |
1225 | } |
1226 | |
1227 | void AggExprEmitter:: |
1228 | VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { |
1229 | llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true" ); |
1230 | llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false" ); |
1231 | llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end" ); |
1232 | |
1233 | // Bind the common expression if necessary. |
1234 | CodeGenFunction::OpaqueValueMapping binding(CGF, E); |
1235 | |
1236 | CodeGenFunction::ConditionalEvaluation eval(CGF); |
1237 | CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock, |
1238 | CGF.getProfileCount(E)); |
1239 | |
1240 | // Save whether the destination's lifetime is externally managed. |
1241 | bool isExternallyDestructed = Dest.isExternallyDestructed(); |
1242 | bool destructNonTrivialCStruct = |
1243 | !isExternallyDestructed && |
1244 | E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct; |
1245 | isExternallyDestructed |= destructNonTrivialCStruct; |
1246 | Dest.setExternallyDestructed(isExternallyDestructed); |
1247 | |
1248 | eval.begin(CGF); |
1249 | CGF.EmitBlock(LHSBlock); |
1250 | CGF.incrementProfileCounter(E); |
1251 | Visit(E->getTrueExpr()); |
1252 | eval.end(CGF); |
1253 | |
1254 | assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!" ); |
1255 | CGF.Builder.CreateBr(ContBlock); |
1256 | |
1257 | // If the result of an agg expression is unused, then the emission |
1258 | // of the LHS might need to create a destination slot. That's fine |
1259 | // with us, and we can safely emit the RHS into the same slot, but |
1260 | // we shouldn't claim that it's already being destructed. |
1261 | Dest.setExternallyDestructed(isExternallyDestructed); |
1262 | |
1263 | eval.begin(CGF); |
1264 | CGF.EmitBlock(RHSBlock); |
1265 | Visit(E->getFalseExpr()); |
1266 | eval.end(CGF); |
1267 | |
1268 | if (destructNonTrivialCStruct) |
1269 | CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(), |
1270 | E->getType()); |
1271 | |
1272 | CGF.EmitBlock(ContBlock); |
1273 | } |
1274 | |
1275 | void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) { |
1276 | Visit(CE->getChosenSubExpr()); |
1277 | } |
1278 | |
1279 | void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { |
1280 | Address ArgValue = Address::invalid(); |
1281 | Address ArgPtr = CGF.EmitVAArg(VE, ArgValue); |
1282 | |
1283 | // If EmitVAArg fails, emit an error. |
1284 | if (!ArgPtr.isValid()) { |
1285 | CGF.ErrorUnsupported(VE, "aggregate va_arg expression" ); |
1286 | return; |
1287 | } |
1288 | |
1289 | EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType())); |
1290 | } |
1291 | |
1292 | void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { |
1293 | // Ensure that we have a slot, but if we already do, remember |
1294 | // whether it was externally destructed. |
1295 | bool wasExternallyDestructed = Dest.isExternallyDestructed(); |
1296 | EnsureDest(E->getType()); |
1297 | |
1298 | // We're going to push a destructor if there isn't already one. |
1299 | Dest.setExternallyDestructed(); |
1300 | |
1301 | Visit(E->getSubExpr()); |
1302 | |
1303 | // Push that destructor we promised. |
1304 | if (!wasExternallyDestructed) |
1305 | CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress()); |
1306 | } |
1307 | |
1308 | void |
1309 | AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { |
1310 | AggValueSlot Slot = EnsureSlot(E->getType()); |
1311 | CGF.EmitCXXConstructExpr(E, Slot); |
1312 | } |
1313 | |
1314 | void AggExprEmitter::VisitCXXInheritedCtorInitExpr( |
1315 | const CXXInheritedCtorInitExpr *E) { |
1316 | AggValueSlot Slot = EnsureSlot(E->getType()); |
1317 | CGF.EmitInheritedCXXConstructorCall( |
1318 | E->getConstructor(), E->constructsVBase(), Slot.getAddress(), |
1319 | E->inheritedFromVBase(), E); |
1320 | } |
1321 | |
1322 | void |
1323 | AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { |
1324 | AggValueSlot Slot = EnsureSlot(E->getType()); |
1325 | LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType()); |
1326 | |
1327 | // We'll need to enter cleanup scopes in case any of the element |
1328 | // initializers throws an exception. |
1329 | SmallVector<EHScopeStack::stable_iterator, 16> Cleanups; |
1330 | llvm::Instruction *CleanupDominator = nullptr; |
1331 | |
1332 | CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin(); |
1333 | for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(), |
1334 | e = E->capture_init_end(); |
1335 | i != e; ++i, ++CurField) { |
1336 | // Emit initialization |
1337 | LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField); |
1338 | if (CurField->hasCapturedVLAType()) { |
1339 | CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV); |
1340 | continue; |
1341 | } |
1342 | |
1343 | EmitInitializationToLValue(*i, LV); |
1344 | |
1345 | // Push a destructor if necessary. |
1346 | if (QualType::DestructionKind DtorKind = |
1347 | CurField->getType().isDestructedType()) { |
1348 | assert(LV.isSimple()); |
1349 | if (CGF.needsEHCleanup(DtorKind)) { |
1350 | if (!CleanupDominator) |
1351 | CleanupDominator = CGF.Builder.CreateAlignedLoad( |
1352 | CGF.Int8Ty, |
1353 | llvm::Constant::getNullValue(CGF.Int8PtrTy), |
1354 | CharUnits::One()); // placeholder |
1355 | |
1356 | CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), CurField->getType(), |
1357 | CGF.getDestroyer(DtorKind), false); |
1358 | Cleanups.push_back(CGF.EHStack.stable_begin()); |
1359 | } |
1360 | } |
1361 | } |
1362 | |
1363 | // Deactivate all the partial cleanups in reverse order, which |
1364 | // generally means popping them. |
1365 | for (unsigned i = Cleanups.size(); i != 0; --i) |
1366 | CGF.DeactivateCleanupBlock(Cleanups[i-1], CleanupDominator); |
1367 | |
1368 | // Destroy the placeholder if we made one. |
1369 | if (CleanupDominator) |
1370 | CleanupDominator->eraseFromParent(); |
1371 | } |
1372 | |
1373 | void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { |
1374 | CodeGenFunction::RunCleanupsScope cleanups(CGF); |
1375 | Visit(E->getSubExpr()); |
1376 | } |
1377 | |
1378 | void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { |
1379 | QualType T = E->getType(); |
1380 | AggValueSlot Slot = EnsureSlot(T); |
1381 | EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T)); |
1382 | } |
1383 | |
1384 | void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { |
1385 | QualType T = E->getType(); |
1386 | AggValueSlot Slot = EnsureSlot(T); |
1387 | EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T)); |
1388 | } |
1389 | |
1390 | /// Determine whether the given cast kind is known to always convert values |
1391 | /// with all zero bits in their value representation to values with all zero |
1392 | /// bits in their value representation. |
1393 | static bool castPreservesZero(const CastExpr *CE) { |
1394 | switch (CE->getCastKind()) { |
1395 | // No-ops. |
1396 | case CK_NoOp: |
1397 | case CK_UserDefinedConversion: |
1398 | case CK_ConstructorConversion: |
1399 | case CK_BitCast: |
1400 | case CK_ToUnion: |
1401 | case CK_ToVoid: |
1402 | // Conversions between (possibly-complex) integral, (possibly-complex) |
1403 | // floating-point, and bool. |
1404 | case CK_BooleanToSignedIntegral: |
1405 | case CK_FloatingCast: |
1406 | case CK_FloatingComplexCast: |
1407 | case CK_FloatingComplexToBoolean: |
1408 | case CK_FloatingComplexToIntegralComplex: |
1409 | case CK_FloatingComplexToReal: |
1410 | case CK_FloatingRealToComplex: |
1411 | case CK_FloatingToBoolean: |
1412 | case CK_FloatingToIntegral: |
1413 | case CK_IntegralCast: |
1414 | case CK_IntegralComplexCast: |
1415 | case CK_IntegralComplexToBoolean: |
1416 | case CK_IntegralComplexToFloatingComplex: |
1417 | case CK_IntegralComplexToReal: |
1418 | case CK_IntegralRealToComplex: |
1419 | case CK_IntegralToBoolean: |
1420 | case CK_IntegralToFloating: |
1421 | // Reinterpreting integers as pointers and vice versa. |
1422 | case CK_IntegralToPointer: |
1423 | case CK_PointerToIntegral: |
1424 | // Language extensions. |
1425 | case CK_VectorSplat: |
1426 | case CK_MatrixCast: |
1427 | case CK_NonAtomicToAtomic: |
1428 | case CK_AtomicToNonAtomic: |
1429 | return true; |
1430 | |
1431 | case CK_BaseToDerivedMemberPointer: |
1432 | case CK_DerivedToBaseMemberPointer: |
1433 | case CK_MemberPointerToBoolean: |
1434 | case CK_NullToMemberPointer: |
1435 | case CK_ReinterpretMemberPointer: |
1436 | // FIXME: ABI-dependent. |
1437 | return false; |
1438 | |
1439 | case CK_AnyPointerToBlockPointerCast: |
1440 | case CK_BlockPointerToObjCPointerCast: |
1441 | case CK_CPointerToObjCPointerCast: |
1442 | case CK_ObjCObjectLValueCast: |
1443 | case CK_IntToOCLSampler: |
1444 | case CK_ZeroToOCLOpaqueType: |
1445 | // FIXME: Check these. |
1446 | return false; |
1447 | |
1448 | case CK_FixedPointCast: |
1449 | case CK_FixedPointToBoolean: |
1450 | case CK_FixedPointToFloating: |
1451 | case CK_FixedPointToIntegral: |
1452 | case CK_FloatingToFixedPoint: |
1453 | case CK_IntegralToFixedPoint: |
1454 | // FIXME: Do all fixed-point types represent zero as all 0 bits? |
1455 | return false; |
1456 | |
1457 | case CK_AddressSpaceConversion: |
1458 | case CK_BaseToDerived: |
1459 | case CK_DerivedToBase: |
1460 | case CK_Dynamic: |
1461 | case CK_NullToPointer: |
1462 | case CK_PointerToBoolean: |
1463 | // FIXME: Preserves zeroes only if zero pointers and null pointers have the |
1464 | // same representation in all involved address spaces. |
1465 | return false; |
1466 | |
1467 | case CK_ARCConsumeObject: |
1468 | case CK_ARCExtendBlockObject: |
1469 | case CK_ARCProduceObject: |
1470 | case CK_ARCReclaimReturnedObject: |
1471 | case CK_CopyAndAutoreleaseBlockObject: |
1472 | case CK_ArrayToPointerDecay: |
1473 | case CK_FunctionToPointerDecay: |
1474 | case CK_BuiltinFnToFnPtr: |
1475 | case CK_Dependent: |
1476 | case CK_LValueBitCast: |
1477 | case CK_LValueToRValue: |
1478 | case CK_LValueToRValueBitCast: |
1479 | case CK_UncheckedDerivedToBase: |
1480 | return false; |
1481 | } |
1482 | llvm_unreachable("Unhandled clang::CastKind enum" ); |
1483 | } |
1484 | |
1485 | /// isSimpleZero - If emitting this value will obviously just cause a store of |
1486 | /// zero to memory, return true. This can return false if uncertain, so it just |
1487 | /// handles simple cases. |
1488 | static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) { |
1489 | E = E->IgnoreParens(); |
1490 | while (auto *CE = dyn_cast<CastExpr>(E)) { |
1491 | if (!castPreservesZero(CE)) |
1492 | break; |
1493 | E = CE->getSubExpr()->IgnoreParens(); |
1494 | } |
1495 | |
1496 | // 0 |
1497 | if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) |
1498 | return IL->getValue() == 0; |
1499 | // +0.0 |
1500 | if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E)) |
1501 | return FL->getValue().isPosZero(); |
1502 | // int() |
1503 | if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) && |
1504 | CGF.getTypes().isZeroInitializable(E->getType())) |
1505 | return true; |
1506 | // (int*)0 - Null pointer expressions. |
1507 | if (const CastExpr *ICE = dyn_cast<CastExpr>(E)) |
1508 | return ICE->getCastKind() == CK_NullToPointer && |
1509 | CGF.getTypes().isPointerZeroInitializable(E->getType()) && |
1510 | !E->HasSideEffects(CGF.getContext()); |
1511 | // '\0' |
1512 | if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) |
1513 | return CL->getValue() == 0; |
1514 | |
1515 | // Otherwise, hard case: conservatively return false. |
1516 | return false; |
1517 | } |
1518 | |
1519 | |
1520 | void |
1521 | AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) { |
1522 | QualType type = LV.getType(); |
1523 | // FIXME: Ignore result? |
1524 | // FIXME: Are initializers affected by volatile? |
1525 | if (Dest.isZeroed() && isSimpleZero(E, CGF)) { |
1526 | // Storing "i32 0" to a zero'd memory location is a noop. |
1527 | return; |
1528 | } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) { |
1529 | return EmitNullInitializationToLValue(LV); |
1530 | } else if (isa<NoInitExpr>(E)) { |
1531 | // Do nothing. |
1532 | return; |
1533 | } else if (type->isReferenceType()) { |
1534 | RValue RV = CGF.EmitReferenceBindingToExpr(E); |
1535 | return CGF.EmitStoreThroughLValue(RV, LV); |
1536 | } |
1537 | |
1538 | switch (CGF.getEvaluationKind(type)) { |
1539 | case TEK_Complex: |
1540 | CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true); |
1541 | return; |
1542 | case TEK_Aggregate: |
1543 | CGF.EmitAggExpr( |
1544 | E, AggValueSlot::forLValue(LV, CGF, AggValueSlot::IsDestructed, |
1545 | AggValueSlot::DoesNotNeedGCBarriers, |
1546 | AggValueSlot::IsNotAliased, |
1547 | AggValueSlot::MayOverlap, Dest.isZeroed())); |
1548 | return; |
1549 | case TEK_Scalar: |
1550 | if (LV.isSimple()) { |
1551 | CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false); |
1552 | } else { |
1553 | CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV); |
1554 | } |
1555 | return; |
1556 | } |
1557 | llvm_unreachable("bad evaluation kind" ); |
1558 | } |
1559 | |
1560 | void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) { |
1561 | QualType type = lv.getType(); |
1562 | |
1563 | // If the destination slot is already zeroed out before the aggregate is |
1564 | // copied into it, we don't have to emit any zeros here. |
1565 | if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type)) |
1566 | return; |
1567 | |
1568 | if (CGF.hasScalarEvaluationKind(type)) { |
1569 | // For non-aggregates, we can store the appropriate null constant. |
1570 | llvm::Value *null = CGF.CGM.EmitNullConstant(type); |
1571 | // Note that the following is not equivalent to |
1572 | // EmitStoreThroughBitfieldLValue for ARC types. |
1573 | if (lv.isBitField()) { |
1574 | CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv); |
1575 | } else { |
1576 | assert(lv.isSimple()); |
1577 | CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true); |
1578 | } |
1579 | } else { |
1580 | // There's a potential optimization opportunity in combining |
1581 | // memsets; that would be easy for arrays, but relatively |
1582 | // difficult for structures with the current code. |
1583 | CGF.EmitNullInitialization(lv.getAddress(CGF), lv.getType()); |
1584 | } |
1585 | } |
1586 | |
1587 | void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { |
1588 | #if 0 |
1589 | // FIXME: Assess perf here? Figure out what cases are worth optimizing here |
1590 | // (Length of globals? Chunks of zeroed-out space?). |
1591 | // |
1592 | // If we can, prefer a copy from a global; this is a lot less code for long |
1593 | // globals, and it's easier for the current optimizers to analyze. |
1594 | if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) { |
1595 | llvm::GlobalVariable* GV = |
1596 | new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true, |
1597 | llvm::GlobalValue::InternalLinkage, C, "" ); |
1598 | EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType())); |
1599 | return; |
1600 | } |
1601 | #endif |
1602 | if (E->hadArrayRangeDesignator()) |
1603 | CGF.ErrorUnsupported(E, "GNU array range designator extension" ); |
1604 | |
1605 | if (E->isTransparent()) |
1606 | return Visit(E->getInit(0)); |
1607 | |
1608 | AggValueSlot Dest = EnsureSlot(E->getType()); |
1609 | |
1610 | LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType()); |
1611 | |
1612 | // Handle initialization of an array. |
1613 | if (E->getType()->isArrayType()) { |
1614 | auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType()); |
1615 | EmitArrayInit(Dest.getAddress(), AType, E->getType(), E); |
1616 | return; |
1617 | } |
1618 | |
1619 | assert(E->getType()->isRecordType() && "Only support structs/unions here!" ); |
1620 | |
1621 | // Do struct initialization; this code just sets each individual member |
1622 | // to the approprate value. This makes bitfield support automatic; |
1623 | // the disadvantage is that the generated code is more difficult for |
1624 | // the optimizer, especially with bitfields. |
1625 | unsigned NumInitElements = E->getNumInits(); |
1626 | RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl(); |
1627 | |
1628 | // We'll need to enter cleanup scopes in case any of the element |
1629 | // initializers throws an exception. |
1630 | SmallVector<EHScopeStack::stable_iterator, 16> cleanups; |
1631 | llvm::Instruction *cleanupDominator = nullptr; |
1632 | auto addCleanup = [&](const EHScopeStack::stable_iterator &cleanup) { |
1633 | cleanups.push_back(cleanup); |
1634 | if (!cleanupDominator) // create placeholder once needed |
1635 | cleanupDominator = CGF.Builder.CreateAlignedLoad( |
1636 | CGF.Int8Ty, llvm::Constant::getNullValue(CGF.Int8PtrTy), |
1637 | CharUnits::One()); |
1638 | }; |
1639 | |
1640 | unsigned curInitIndex = 0; |
1641 | |
1642 | // Emit initialization of base classes. |
1643 | if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) { |
1644 | assert(E->getNumInits() >= CXXRD->getNumBases() && |
1645 | "missing initializer for base class" ); |
1646 | for (auto &Base : CXXRD->bases()) { |
1647 | assert(!Base.isVirtual() && "should not see vbases here" ); |
1648 | auto *BaseRD = Base.getType()->getAsCXXRecordDecl(); |
1649 | Address V = CGF.GetAddressOfDirectBaseInCompleteClass( |
1650 | Dest.getAddress(), CXXRD, BaseRD, |
1651 | /*isBaseVirtual*/ false); |
1652 | AggValueSlot AggSlot = AggValueSlot::forAddr( |
1653 | V, Qualifiers(), |
1654 | AggValueSlot::IsDestructed, |
1655 | AggValueSlot::DoesNotNeedGCBarriers, |
1656 | AggValueSlot::IsNotAliased, |
1657 | CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual())); |
1658 | CGF.EmitAggExpr(E->getInit(curInitIndex++), AggSlot); |
1659 | |
1660 | if (QualType::DestructionKind dtorKind = |
1661 | Base.getType().isDestructedType()) { |
1662 | CGF.pushDestroy(dtorKind, V, Base.getType()); |
1663 | addCleanup(CGF.EHStack.stable_begin()); |
1664 | } |
1665 | } |
1666 | } |
1667 | |
1668 | // Prepare a 'this' for CXXDefaultInitExprs. |
1669 | CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress()); |
1670 | |
1671 | if (record->isUnion()) { |
1672 | // Only initialize one field of a union. The field itself is |
1673 | // specified by the initializer list. |
1674 | if (!E->getInitializedFieldInUnion()) { |
1675 | // Empty union; we have nothing to do. |
1676 | |
1677 | #ifndef NDEBUG |
1678 | // Make sure that it's really an empty and not a failure of |
1679 | // semantic analysis. |
1680 | for (const auto *Field : record->fields()) |
1681 | assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed" ); |
1682 | #endif |
1683 | return; |
1684 | } |
1685 | |
1686 | // FIXME: volatility |
1687 | FieldDecl *Field = E->getInitializedFieldInUnion(); |
1688 | |
1689 | LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field); |
1690 | if (NumInitElements) { |
1691 | // Store the initializer into the field |
1692 | EmitInitializationToLValue(E->getInit(0), FieldLoc); |
1693 | } else { |
1694 | // Default-initialize to null. |
1695 | EmitNullInitializationToLValue(FieldLoc); |
1696 | } |
1697 | |
1698 | return; |
1699 | } |
1700 | |
1701 | // Here we iterate over the fields; this makes it simpler to both |
1702 | // default-initialize fields and skip over unnamed fields. |
1703 | for (const auto *field : record->fields()) { |
1704 | // We're done once we hit the flexible array member. |
1705 | if (field->getType()->isIncompleteArrayType()) |
1706 | break; |
1707 | |
1708 | // Always skip anonymous bitfields. |
1709 | if (field->isUnnamedBitfield()) |
1710 | continue; |
1711 | |
1712 | // We're done if we reach the end of the explicit initializers, we |
1713 | // have a zeroed object, and the rest of the fields are |
1714 | // zero-initializable. |
1715 | if (curInitIndex == NumInitElements && Dest.isZeroed() && |
1716 | CGF.getTypes().isZeroInitializable(E->getType())) |
1717 | break; |
1718 | |
1719 | |
1720 | LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field); |
1721 | // We never generate write-barries for initialized fields. |
1722 | LV.setNonGC(true); |
1723 | |
1724 | if (curInitIndex < NumInitElements) { |
1725 | // Store the initializer into the field. |
1726 | EmitInitializationToLValue(E->getInit(curInitIndex++), LV); |
1727 | } else { |
1728 | // We're out of initializers; default-initialize to null |
1729 | EmitNullInitializationToLValue(LV); |
1730 | } |
1731 | |
1732 | // Push a destructor if necessary. |
1733 | // FIXME: if we have an array of structures, all explicitly |
1734 | // initialized, we can end up pushing a linear number of cleanups. |
1735 | bool pushedCleanup = false; |
1736 | if (QualType::DestructionKind dtorKind |
1737 | = field->getType().isDestructedType()) { |
1738 | assert(LV.isSimple()); |
1739 | if (CGF.needsEHCleanup(dtorKind)) { |
1740 | CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), field->getType(), |
1741 | CGF.getDestroyer(dtorKind), false); |
1742 | addCleanup(CGF.EHStack.stable_begin()); |
1743 | pushedCleanup = true; |
1744 | } |
1745 | } |
1746 | |
1747 | // If the GEP didn't get used because of a dead zero init or something |
1748 | // else, clean it up for -O0 builds and general tidiness. |
1749 | if (!pushedCleanup && LV.isSimple()) |
1750 | if (llvm::GetElementPtrInst *GEP = |
1751 | dyn_cast<llvm::GetElementPtrInst>(LV.getPointer(CGF))) |
1752 | if (GEP->use_empty()) |
1753 | GEP->eraseFromParent(); |
1754 | } |
1755 | |
1756 | // Deactivate all the partial cleanups in reverse order, which |
1757 | // generally means popping them. |
1758 | assert((cleanupDominator || cleanups.empty()) && |
1759 | "Missing cleanupDominator before deactivating cleanup blocks" ); |
1760 | for (unsigned i = cleanups.size(); i != 0; --i) |
1761 | CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator); |
1762 | |
1763 | // Destroy the placeholder if we made one. |
1764 | if (cleanupDominator) |
1765 | cleanupDominator->eraseFromParent(); |
1766 | } |
1767 | |
1768 | void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, |
1769 | llvm::Value *outerBegin) { |
1770 | // Emit the common subexpression. |
1771 | CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr()); |
1772 | |
1773 | Address destPtr = EnsureSlot(E->getType()).getAddress(); |
1774 | uint64_t numElements = E->getArraySize().getZExtValue(); |
1775 | |
1776 | if (!numElements) |
1777 | return; |
1778 | |
1779 | // destPtr is an array*. Construct an elementType* by drilling down a level. |
1780 | llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); |
1781 | llvm::Value *indices[] = {zero, zero}; |
1782 | llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getPointer(), indices, |
1783 | "arrayinit.begin" ); |
1784 | |
1785 | // Prepare to special-case multidimensional array initialization: we avoid |
1786 | // emitting multiple destructor loops in that case. |
1787 | if (!outerBegin) |
1788 | outerBegin = begin; |
1789 | ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr()); |
1790 | |
1791 | QualType elementType = |
1792 | CGF.getContext().getAsArrayType(E->getType())->getElementType(); |
1793 | CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType); |
1794 | CharUnits elementAlign = |
1795 | destPtr.getAlignment().alignmentOfArrayElement(elementSize); |
1796 | |
1797 | llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); |
1798 | llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body" ); |
1799 | |
1800 | // Jump into the body. |
1801 | CGF.EmitBlock(bodyBB); |
1802 | llvm::PHINode *index = |
1803 | Builder.CreatePHI(zero->getType(), 2, "arrayinit.index" ); |
1804 | index->addIncoming(zero, entryBB); |
1805 | llvm::Value *element = Builder.CreateInBoundsGEP(begin, index); |
1806 | |
1807 | // Prepare for a cleanup. |
1808 | QualType::DestructionKind dtorKind = elementType.isDestructedType(); |
1809 | EHScopeStack::stable_iterator cleanup; |
1810 | if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) { |
1811 | if (outerBegin->getType() != element->getType()) |
1812 | outerBegin = Builder.CreateBitCast(outerBegin, element->getType()); |
1813 | CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType, |
1814 | elementAlign, |
1815 | CGF.getDestroyer(dtorKind)); |
1816 | cleanup = CGF.EHStack.stable_begin(); |
1817 | } else { |
1818 | dtorKind = QualType::DK_none; |
1819 | } |
1820 | |
1821 | // Emit the actual filler expression. |
1822 | { |
1823 | // Temporaries created in an array initialization loop are destroyed |
1824 | // at the end of each iteration. |
1825 | CodeGenFunction::RunCleanupsScope CleanupsScope(CGF); |
1826 | CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index); |
1827 | LValue elementLV = |
1828 | CGF.MakeAddrLValue(Address(element, elementAlign), elementType); |
1829 | |
1830 | if (InnerLoop) { |
1831 | // If the subexpression is an ArrayInitLoopExpr, share its cleanup. |
1832 | auto elementSlot = AggValueSlot::forLValue( |
1833 | elementLV, CGF, AggValueSlot::IsDestructed, |
1834 | AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, |
1835 | AggValueSlot::DoesNotOverlap); |
1836 | AggExprEmitter(CGF, elementSlot, false) |
1837 | .VisitArrayInitLoopExpr(InnerLoop, outerBegin); |
1838 | } else |
1839 | EmitInitializationToLValue(E->getSubExpr(), elementLV); |
1840 | } |
1841 | |
1842 | // Move on to the next element. |
1843 | llvm::Value *nextIndex = Builder.CreateNUWAdd( |
1844 | index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next" ); |
1845 | index->addIncoming(nextIndex, Builder.GetInsertBlock()); |
1846 | |
1847 | // Leave the loop if we're done. |
1848 | llvm::Value *done = Builder.CreateICmpEQ( |
1849 | nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements), |
1850 | "arrayinit.done" ); |
1851 | llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end" ); |
1852 | Builder.CreateCondBr(done, endBB, bodyBB); |
1853 | |
1854 | CGF.EmitBlock(endBB); |
1855 | |
1856 | // Leave the partial-array cleanup if we entered one. |
1857 | if (dtorKind) |
1858 | CGF.DeactivateCleanupBlock(cleanup, index); |
1859 | } |
1860 | |
1861 | void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) { |
1862 | AggValueSlot Dest = EnsureSlot(E->getType()); |
1863 | |
1864 | LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType()); |
1865 | EmitInitializationToLValue(E->getBase(), DestLV); |
1866 | VisitInitListExpr(E->getUpdater()); |
1867 | } |
1868 | |
1869 | //===----------------------------------------------------------------------===// |
1870 | // Entry Points into this File |
1871 | //===----------------------------------------------------------------------===// |
1872 | |
1873 | /// GetNumNonZeroBytesInInit - Get an approximate count of the number of |
1874 | /// non-zero bytes that will be stored when outputting the initializer for the |
1875 | /// specified initializer expression. |
1876 | static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) { |
1877 | if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E)) |
1878 | E = MTE->getSubExpr(); |
1879 | E = E->IgnoreParenNoopCasts(CGF.getContext()); |
1880 | |
1881 | // 0 and 0.0 won't require any non-zero stores! |
1882 | if (isSimpleZero(E, CGF)) return CharUnits::Zero(); |
1883 | |
1884 | // If this is an initlist expr, sum up the size of sizes of the (present) |
1885 | // elements. If this is something weird, assume the whole thing is non-zero. |
1886 | const InitListExpr *ILE = dyn_cast<InitListExpr>(E); |
1887 | while (ILE && ILE->isTransparent()) |
1888 | ILE = dyn_cast<InitListExpr>(ILE->getInit(0)); |
1889 | if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType())) |
1890 | return CGF.getContext().getTypeSizeInChars(E->getType()); |
1891 | |
1892 | // InitListExprs for structs have to be handled carefully. If there are |
1893 | // reference members, we need to consider the size of the reference, not the |
1894 | // referencee. InitListExprs for unions and arrays can't have references. |
1895 | if (const RecordType *RT = E->getType()->getAs<RecordType>()) { |
1896 | if (!RT->isUnionType()) { |
1897 | RecordDecl *SD = RT->getDecl(); |
1898 | CharUnits NumNonZeroBytes = CharUnits::Zero(); |
1899 | |
1900 | unsigned ILEElement = 0; |
1901 | if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD)) |
1902 | while (ILEElement != CXXRD->getNumBases()) |
1903 | NumNonZeroBytes += |
1904 | GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF); |
1905 | for (const auto *Field : SD->fields()) { |
1906 | // We're done once we hit the flexible array member or run out of |
1907 | // InitListExpr elements. |
1908 | if (Field->getType()->isIncompleteArrayType() || |
1909 | ILEElement == ILE->getNumInits()) |
1910 | break; |
1911 | if (Field->isUnnamedBitfield()) |
1912 | continue; |
1913 | |
1914 | const Expr *E = ILE->getInit(ILEElement++); |
1915 | |
1916 | // Reference values are always non-null and have the width of a pointer. |
1917 | if (Field->getType()->isReferenceType()) |
1918 | NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits( |
1919 | CGF.getTarget().getPointerWidth(0)); |
1920 | else |
1921 | NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF); |
1922 | } |
1923 | |
1924 | return NumNonZeroBytes; |
1925 | } |
1926 | } |
1927 | |
1928 | // FIXME: This overestimates the number of non-zero bytes for bit-fields. |
1929 | CharUnits NumNonZeroBytes = CharUnits::Zero(); |
1930 | for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) |
1931 | NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF); |
1932 | return NumNonZeroBytes; |
1933 | } |
1934 | |
1935 | /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of |
1936 | /// zeros in it, emit a memset and avoid storing the individual zeros. |
1937 | /// |
1938 | static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, |
1939 | CodeGenFunction &CGF) { |
1940 | // If the slot is already known to be zeroed, nothing to do. Don't mess with |
1941 | // volatile stores. |
1942 | if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid()) |
1943 | return; |
1944 | |
1945 | // C++ objects with a user-declared constructor don't need zero'ing. |
1946 | if (CGF.getLangOpts().CPlusPlus) |
1947 | if (const RecordType *RT = CGF.getContext() |
1948 | .getBaseElementType(E->getType())->getAs<RecordType>()) { |
1949 | const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); |
1950 | if (RD->hasUserDeclaredConstructor()) |
1951 | return; |
1952 | } |
1953 | |
1954 | // If the type is 16-bytes or smaller, prefer individual stores over memset. |
1955 | CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType()); |
1956 | if (Size <= CharUnits::fromQuantity(16)) |
1957 | return; |
1958 | |
1959 | // Check to see if over 3/4 of the initializer are known to be zero. If so, |
1960 | // we prefer to emit memset + individual stores for the rest. |
1961 | CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF); |
1962 | if (NumNonZeroBytes*4 > Size) |
1963 | return; |
1964 | |
1965 | // Okay, it seems like a good idea to use an initial memset, emit the call. |
1966 | llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity()); |
1967 | |
1968 | Address Loc = Slot.getAddress(); |
1969 | Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty); |
1970 | CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false); |
1971 | |
1972 | // Tell the AggExprEmitter that the slot is known zero. |
1973 | Slot.setZeroed(); |
1974 | } |
1975 | |
1976 | |
1977 | |
1978 | |
1979 | /// EmitAggExpr - Emit the computation of the specified expression of aggregate |
1980 | /// type. The result is computed into DestPtr. Note that if DestPtr is null, |
1981 | /// the value of the aggregate expression is not needed. If VolatileDest is |
1982 | /// true, DestPtr cannot be 0. |
1983 | void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) { |
1984 | assert(E && hasAggregateEvaluationKind(E->getType()) && |
1985 | "Invalid aggregate expression to emit" ); |
1986 | assert((Slot.getAddress().isValid() || Slot.isIgnored()) && |
1987 | "slot has bits but no address" ); |
1988 | |
1989 | // Optimize the slot if possible. |
1990 | CheckAggExprForMemSetUse(Slot, E, *this); |
1991 | |
1992 | AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E)); |
1993 | } |
1994 | |
1995 | LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) { |
1996 | |
---|