1//===-- MemoryOpRemark.cpp - Auto-init remark analysis---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Implementation of the analysis for the "auto-init" remark.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Transforms/Utils/MemoryOpRemark.h"
14#include "llvm/ADT/SmallString.h"
15#include "llvm/Analysis/OptimizationRemarkEmitter.h"
16#include "llvm/Analysis/ValueTracking.h"
17#include "llvm/IR/DebugInfo.h"
18#include "llvm/IR/Instructions.h"
19#include "llvm/IR/IntrinsicInst.h"
20#include <optional>
21
22using namespace llvm;
23using namespace llvm::ore;
24
25MemoryOpRemark::~MemoryOpRemark() = default;
26
27bool MemoryOpRemark::canHandle(const Instruction *I, const TargetLibraryInfo &TLI) {
28 if (isa<StoreInst>(Val: I))
29 return true;
30
31 if (auto *II = dyn_cast<IntrinsicInst>(Val: I)) {
32 switch (II->getIntrinsicID()) {
33 case Intrinsic::memcpy_inline:
34 case Intrinsic::memcpy:
35 case Intrinsic::memmove:
36 case Intrinsic::memset:
37 case Intrinsic::memcpy_element_unordered_atomic:
38 case Intrinsic::memmove_element_unordered_atomic:
39 case Intrinsic::memset_element_unordered_atomic:
40 return true;
41 default:
42 return false;
43 }
44 }
45
46 if (auto *CI = dyn_cast<CallInst>(Val: I)) {
47 auto *CF = CI->getCalledFunction();
48 if (!CF)
49 return false;
50
51 if (!CF->hasName())
52 return false;
53
54 LibFunc LF;
55 bool KnownLibCall = TLI.getLibFunc(FDecl: *CF, F&: LF) && TLI.has(F: LF);
56 if (!KnownLibCall)
57 return false;
58
59 switch (LF) {
60 case LibFunc_memcpy_chk:
61 case LibFunc_mempcpy_chk:
62 case LibFunc_memset_chk:
63 case LibFunc_memmove_chk:
64 case LibFunc_memcpy:
65 case LibFunc_mempcpy:
66 case LibFunc_memset:
67 case LibFunc_memmove:
68 case LibFunc_bzero:
69 case LibFunc_bcopy:
70 return true;
71 default:
72 return false;
73 }
74 }
75
76 return false;
77}
78
79void MemoryOpRemark::visit(const Instruction *I) {
80 // For some of them, we can provide more information:
81
82 // For stores:
83 // * size
84 // * volatile / atomic
85 if (auto *SI = dyn_cast<StoreInst>(Val: I)) {
86 visitStore(SI: *SI);
87 return;
88 }
89
90 // For intrinsics:
91 // * user-friendly name
92 // * size
93 if (auto *II = dyn_cast<IntrinsicInst>(Val: I)) {
94 visitIntrinsicCall(II: *II);
95 return;
96 }
97
98 // For calls:
99 // * known/unknown function (e.g. the compiler knows bzero, but it doesn't
100 // know my_bzero)
101 // * memory operation size
102 if (auto *CI = dyn_cast<CallInst>(Val: I)) {
103 visitCall(CI: *CI);
104 return;
105 }
106
107 visitUnknown(I: *I);
108}
109
110std::string MemoryOpRemark::explainSource(StringRef Type) const {
111 return (Type + ".").str();
112}
113
114StringRef MemoryOpRemark::remarkName(RemarkKind RK) const {
115 switch (RK) {
116 case RK_Store:
117 return "MemoryOpStore";
118 case RK_Unknown:
119 return "MemoryOpUnknown";
120 case RK_IntrinsicCall:
121 return "MemoryOpIntrinsicCall";
122 case RK_Call:
123 return "MemoryOpCall";
124 }
125 llvm_unreachable("missing RemarkKind case");
126}
127
128static void inlineVolatileOrAtomicWithExtraArgs(bool *Inline, bool Volatile,
129 bool Atomic,
130 DiagnosticInfoIROptimization &R) {
131 if (Inline && *Inline)
132 R << " Inlined: " << NV("StoreInlined", true) << ".";
133 if (Volatile)
134 R << " Volatile: " << NV("StoreVolatile", true) << ".";
135 if (Atomic)
136 R << " Atomic: " << NV("StoreAtomic", true) << ".";
137 // Emit the false cases under ExtraArgs. This won't show them in the remark
138 // message but will end up in the serialized remarks.
139 if ((Inline && !*Inline) || !Volatile || !Atomic)
140 R << setExtraArgs();
141 if (Inline && !*Inline)
142 R << " Inlined: " << NV("StoreInlined", false) << ".";
143 if (!Volatile)
144 R << " Volatile: " << NV("StoreVolatile", false) << ".";
145 if (!Atomic)
146 R << " Atomic: " << NV("StoreAtomic", false) << ".";
147}
148
149static std::optional<uint64_t>
150getSizeInBytes(std::optional<uint64_t> SizeInBits) {
151 if (!SizeInBits || *SizeInBits % 8 != 0)
152 return std::nullopt;
153 return *SizeInBits / 8;
154}
155
156template<typename ...Ts>
157std::unique_ptr<DiagnosticInfoIROptimization>
158MemoryOpRemark::makeRemark(Ts... Args) {
159 switch (diagnosticKind()) {
160 case DK_OptimizationRemarkAnalysis:
161 return std::make_unique<OptimizationRemarkAnalysis>(Args...);
162 case DK_OptimizationRemarkMissed:
163 return std::make_unique<OptimizationRemarkMissed>(Args...);
164 default:
165 llvm_unreachable("unexpected DiagnosticKind");
166 }
167}
168
169void MemoryOpRemark::visitStore(const StoreInst &SI) {
170 bool Volatile = SI.isVolatile();
171 bool Atomic = SI.isAtomic();
172 int64_t Size = DL.getTypeStoreSize(Ty: SI.getOperand(i_nocapture: 0)->getType());
173
174 auto R = makeRemark(Args: RemarkPass.data(), Args: remarkName(RK: RK_Store), Args: &SI);
175 *R << explainSource(Type: "Store") << "\nStore size: " << NV("StoreSize", Size)
176 << " bytes.";
177 visitPtr(V: SI.getOperand(i_nocapture: 1), /*IsRead=*/IsSrc: false, R&: *R);
178 inlineVolatileOrAtomicWithExtraArgs(Inline: nullptr, Volatile, Atomic, R&: *R);
179 ORE.emit(OptDiag&: *R);
180}
181
182void MemoryOpRemark::visitUnknown(const Instruction &I) {
183 auto R = makeRemark(Args: RemarkPass.data(), Args: remarkName(RK: RK_Unknown), Args: &I);
184 *R << explainSource(Type: "Initialization");
185 ORE.emit(OptDiag&: *R);
186}
187
188void MemoryOpRemark::visitIntrinsicCall(const IntrinsicInst &II) {
189 SmallString<32> CallTo;
190 bool Atomic = false;
191 bool Inline = false;
192 switch (II.getIntrinsicID()) {
193 case Intrinsic::memcpy_inline:
194 CallTo = "memcpy";
195 Inline = true;
196 break;
197 case Intrinsic::memcpy:
198 CallTo = "memcpy";
199 break;
200 case Intrinsic::memmove:
201 CallTo = "memmove";
202 break;
203 case Intrinsic::memset:
204 CallTo = "memset";
205 break;
206 case Intrinsic::memcpy_element_unordered_atomic:
207 CallTo = "memcpy";
208 Atomic = true;
209 break;
210 case Intrinsic::memmove_element_unordered_atomic:
211 CallTo = "memmove";
212 Atomic = true;
213 break;
214 case Intrinsic::memset_element_unordered_atomic:
215 CallTo = "memset";
216 Atomic = true;
217 break;
218 default:
219 return visitUnknown(I: II);
220 }
221
222 auto R = makeRemark(Args: RemarkPass.data(), Args: remarkName(RK: RK_IntrinsicCall), Args: &II);
223 visitCallee(F: CallTo.str(), /*KnownLibCall=*/true, R&: *R);
224 visitSizeOperand(V: II.getOperand(i_nocapture: 2), R&: *R);
225
226 auto *CIVolatile = dyn_cast<ConstantInt>(Val: II.getOperand(i_nocapture: 3));
227 // No such thing as a memory intrinsic that is both atomic and volatile.
228 bool Volatile = !Atomic && CIVolatile && CIVolatile->getZExtValue();
229 switch (II.getIntrinsicID()) {
230 case Intrinsic::memcpy_inline:
231 case Intrinsic::memcpy:
232 case Intrinsic::memmove:
233 case Intrinsic::memcpy_element_unordered_atomic:
234 visitPtr(V: II.getOperand(i_nocapture: 1), /*IsRead=*/IsSrc: true, R&: *R);
235 visitPtr(V: II.getOperand(i_nocapture: 0), /*IsRead=*/IsSrc: false, R&: *R);
236 break;
237 case Intrinsic::memset:
238 case Intrinsic::memset_element_unordered_atomic:
239 visitPtr(V: II.getOperand(i_nocapture: 0), /*IsRead=*/IsSrc: false, R&: *R);
240 break;
241 }
242 inlineVolatileOrAtomicWithExtraArgs(Inline: &Inline, Volatile, Atomic, R&: *R);
243 ORE.emit(OptDiag&: *R);
244}
245
246void MemoryOpRemark::visitCall(const CallInst &CI) {
247 Function *F = CI.getCalledFunction();
248 if (!F)
249 return visitUnknown(I: CI);
250
251 LibFunc LF;
252 bool KnownLibCall = TLI.getLibFunc(FDecl: *F, F&: LF) && TLI.has(F: LF);
253 auto R = makeRemark(Args: RemarkPass.data(), Args: remarkName(RK: RK_Call), Args: &CI);
254 visitCallee(F, KnownLibCall, R&: *R);
255 visitKnownLibCall(CI, LF, R&: *R);
256 ORE.emit(OptDiag&: *R);
257}
258
259template <typename FTy>
260void MemoryOpRemark::visitCallee(FTy F, bool KnownLibCall,
261 DiagnosticInfoIROptimization &R) {
262 R << "Call to ";
263 if (!KnownLibCall)
264 R << NV("UnknownLibCall", "unknown") << " function ";
265 R << NV("Callee", F) << explainSource(Type: "");
266}
267
268void MemoryOpRemark::visitKnownLibCall(const CallInst &CI, LibFunc LF,
269 DiagnosticInfoIROptimization &R) {
270 switch (LF) {
271 default:
272 return;
273 case LibFunc_memset_chk:
274 case LibFunc_memset:
275 visitSizeOperand(V: CI.getOperand(i_nocapture: 2), R);
276 visitPtr(V: CI.getOperand(i_nocapture: 0), /*IsRead=*/IsSrc: false, R);
277 break;
278 case LibFunc_bzero:
279 visitSizeOperand(V: CI.getOperand(i_nocapture: 1), R);
280 visitPtr(V: CI.getOperand(i_nocapture: 0), /*IsRead=*/IsSrc: false, R);
281 break;
282 case LibFunc_memcpy_chk:
283 case LibFunc_mempcpy_chk:
284 case LibFunc_memmove_chk:
285 case LibFunc_memcpy:
286 case LibFunc_mempcpy:
287 case LibFunc_memmove:
288 case LibFunc_bcopy:
289 visitSizeOperand(V: CI.getOperand(i_nocapture: 2), R);
290 visitPtr(V: CI.getOperand(i_nocapture: 1), /*IsRead=*/IsSrc: true, R);
291 visitPtr(V: CI.getOperand(i_nocapture: 0), /*IsRead=*/IsSrc: false, R);
292 break;
293 }
294}
295
296void MemoryOpRemark::visitSizeOperand(Value *V, DiagnosticInfoIROptimization &R) {
297 if (auto *Len = dyn_cast<ConstantInt>(Val: V)) {
298 uint64_t Size = Len->getZExtValue();
299 R << " Memory operation size: " << NV("StoreSize", Size) << " bytes.";
300 }
301}
302
303static std::optional<StringRef> nameOrNone(const Value *V) {
304 if (V->hasName())
305 return V->getName();
306 return std::nullopt;
307}
308
309void MemoryOpRemark::visitVariable(const Value *V,
310 SmallVectorImpl<VariableInfo> &Result) {
311 if (auto *GV = dyn_cast<GlobalVariable>(Val: V)) {
312 auto *Ty = GV->getValueType();
313 uint64_t Size = DL.getTypeSizeInBits(Ty).getFixedValue();
314 VariableInfo Var{.Name: nameOrNone(V: GV), .Size: Size};
315 if (!Var.isEmpty())
316 Result.push_back(Elt: std::move(Var));
317 return;
318 }
319
320 // If we find some information in the debug info, take that.
321 bool FoundDI = false;
322 // Try to get an llvm.dbg.declare, which has a DILocalVariable giving us the
323 // real debug info name and size of the variable.
324 auto FindDI = [&](const auto *DVI) {
325 if (DILocalVariable *DILV = DVI->getVariable()) {
326 std::optional<uint64_t> DISize = getSizeInBytes(SizeInBits: DILV->getSizeInBits());
327 VariableInfo Var{.Name: DILV->getName(), .Size: DISize};
328 if (!Var.isEmpty()) {
329 Result.push_back(Elt: std::move(Var));
330 FoundDI = true;
331 }
332 }
333 };
334 for_each(Range: findDbgDeclares(V: const_cast<Value *>(V)), F: FindDI);
335 for_each(Range: findDVRDeclares(V: const_cast<Value *>(V)), F: FindDI);
336
337 if (FoundDI) {
338 assert(!Result.empty());
339 return;
340 }
341
342 const auto *AI = dyn_cast<AllocaInst>(Val: V);
343 if (!AI)
344 return;
345
346 // If not, get it from the alloca.
347 std::optional<TypeSize> TySize = AI->getAllocationSize(DL);
348 std::optional<uint64_t> Size =
349 TySize ? std::optional(TySize->getFixedValue()) : std::nullopt;
350 VariableInfo Var{.Name: nameOrNone(V: AI), .Size: Size};
351 if (!Var.isEmpty())
352 Result.push_back(Elt: std::move(Var));
353}
354
355void MemoryOpRemark::visitPtr(Value *Ptr, bool IsRead, DiagnosticInfoIROptimization &R) {
356 // Find if Ptr is a known variable we can give more information on.
357 SmallVector<Value *, 2> Objects;
358 getUnderlyingObjectsForCodeGen(V: Ptr, Objects);
359 SmallVector<VariableInfo, 2> VIs;
360 for (const Value *V : Objects)
361 visitVariable(V, Result&: VIs);
362
363 if (VIs.empty()) {
364 bool CanBeNull;
365 bool CanBeFreed;
366 uint64_t Size = Ptr->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
367 if (!Size)
368 return;
369 VIs.push_back(Elt: {.Name: std::nullopt, .Size: Size});
370 }
371
372 R << (IsRead ? "\n Read Variables: " : "\n Written Variables: ");
373 for (unsigned i = 0; i < VIs.size(); ++i) {
374 const VariableInfo &VI = VIs[i];
375 assert(!VI.isEmpty() && "No extra content to display.");
376 if (i != 0)
377 R << ", ";
378 if (VI.Name)
379 R << NV(IsRead ? "RVarName" : "WVarName", *VI.Name);
380 else
381 R << NV(IsRead ? "RVarName" : "WVarName", "<unknown>");
382 if (VI.Size)
383 R << " (" << NV(IsRead ? "RVarSize" : "WVarSize", *VI.Size) << " bytes)";
384 }
385 R << ".";
386}
387
388bool AutoInitRemark::canHandle(const Instruction *I) {
389 if (!I->hasMetadata(KindID: LLVMContext::MD_annotation))
390 return false;
391 return any_of(Range: I->getMetadata(KindID: LLVMContext::MD_annotation)->operands(),
392 P: [](const MDOperand &Op) {
393 return isa<MDString>(Val: Op.get()) &&
394 cast<MDString>(Val: Op.get())->getString() == "auto-init";
395 });
396}
397
398std::string AutoInitRemark::explainSource(StringRef Type) const {
399 return (Type + " inserted by -ftrivial-auto-var-init.").str();
400}
401
402StringRef AutoInitRemark::remarkName(RemarkKind RK) const {
403 switch (RK) {
404 case RK_Store:
405 return "AutoInitStore";
406 case RK_Unknown:
407 return "AutoInitUnknownInstruction";
408 case RK_IntrinsicCall:
409 return "AutoInitIntrinsicCall";
410 case RK_Call:
411 return "AutoInitCall";
412 }
413 llvm_unreachable("missing RemarkKind case");
414}
415

source code of llvm/lib/Transforms/Utils/MemoryOpRemark.cpp