1 | //===-- Execution.cpp - Implement code to simulate the program ------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the actual instruction interpreter. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "Interpreter.h" |
14 | #include "llvm/ADT/APInt.h" |
15 | #include "llvm/ADT/Statistic.h" |
16 | #include "llvm/CodeGen/IntrinsicLowering.h" |
17 | #include "llvm/IR/Constants.h" |
18 | #include "llvm/IR/DerivedTypes.h" |
19 | #include "llvm/IR/GetElementPtrTypeIterator.h" |
20 | #include "llvm/IR/Instructions.h" |
21 | #include "llvm/Support/CommandLine.h" |
22 | #include "llvm/Support/Debug.h" |
23 | #include "llvm/Support/ErrorHandling.h" |
24 | #include "llvm/Support/MathExtras.h" |
25 | #include "llvm/Support/raw_ostream.h" |
26 | #include <algorithm> |
27 | #include <cmath> |
28 | using namespace llvm; |
29 | |
30 | #define DEBUG_TYPE "interpreter" |
31 | |
32 | STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed" ); |
33 | |
34 | static cl::opt<bool> PrintVolatile("interpreter-print-volatile" , cl::Hidden, |
35 | cl::desc("make the interpreter print every volatile load and store" )); |
36 | |
37 | //===----------------------------------------------------------------------===// |
38 | // Various Helper Functions |
39 | //===----------------------------------------------------------------------===// |
40 | |
41 | static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) { |
42 | SF.Values[V] = Val; |
43 | } |
44 | |
45 | //===----------------------------------------------------------------------===// |
46 | // Unary Instruction Implementations |
47 | //===----------------------------------------------------------------------===// |
48 | |
49 | static void executeFNegInst(GenericValue &Dest, GenericValue Src, Type *Ty) { |
50 | switch (Ty->getTypeID()) { |
51 | case Type::FloatTyID: |
52 | Dest.FloatVal = -Src.FloatVal; |
53 | break; |
54 | case Type::DoubleTyID: |
55 | Dest.DoubleVal = -Src.DoubleVal; |
56 | break; |
57 | default: |
58 | llvm_unreachable("Unhandled type for FNeg instruction" ); |
59 | } |
60 | } |
61 | |
62 | void Interpreter::visitUnaryOperator(UnaryOperator &I) { |
63 | ExecutionContext &SF = ECStack.back(); |
64 | Type *Ty = I.getOperand(i_nocapture: 0)->getType(); |
65 | GenericValue Src = getOperandValue(V: I.getOperand(i_nocapture: 0), SF); |
66 | GenericValue R; // Result |
67 | |
68 | // First process vector operation |
69 | if (Ty->isVectorTy()) { |
70 | R.AggregateVal.resize(new_size: Src.AggregateVal.size()); |
71 | |
72 | switch(I.getOpcode()) { |
73 | default: |
74 | llvm_unreachable("Don't know how to handle this unary operator" ); |
75 | break; |
76 | case Instruction::FNeg: |
77 | if (cast<VectorType>(Val: Ty)->getElementType()->isFloatTy()) { |
78 | for (unsigned i = 0; i < R.AggregateVal.size(); ++i) |
79 | R.AggregateVal[i].FloatVal = -Src.AggregateVal[i].FloatVal; |
80 | } else if (cast<VectorType>(Val: Ty)->getElementType()->isDoubleTy()) { |
81 | for (unsigned i = 0; i < R.AggregateVal.size(); ++i) |
82 | R.AggregateVal[i].DoubleVal = -Src.AggregateVal[i].DoubleVal; |
83 | } else { |
84 | llvm_unreachable("Unhandled type for FNeg instruction" ); |
85 | } |
86 | break; |
87 | } |
88 | } else { |
89 | switch (I.getOpcode()) { |
90 | default: |
91 | llvm_unreachable("Don't know how to handle this unary operator" ); |
92 | break; |
93 | case Instruction::FNeg: executeFNegInst(Dest&: R, Src, Ty); break; |
94 | } |
95 | } |
96 | SetValue(V: &I, Val: R, SF); |
97 | } |
98 | |
99 | //===----------------------------------------------------------------------===// |
100 | // Binary Instruction Implementations |
101 | //===----------------------------------------------------------------------===// |
102 | |
103 | #define IMPLEMENT_BINARY_OPERATOR(OP, TY) \ |
104 | case Type::TY##TyID: \ |
105 | Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \ |
106 | break |
107 | |
108 | static void executeFAddInst(GenericValue &Dest, GenericValue Src1, |
109 | GenericValue Src2, Type *Ty) { |
110 | switch (Ty->getTypeID()) { |
111 | IMPLEMENT_BINARY_OPERATOR(+, Float); |
112 | IMPLEMENT_BINARY_OPERATOR(+, Double); |
113 | default: |
114 | dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n" ; |
115 | llvm_unreachable(nullptr); |
116 | } |
117 | } |
118 | |
119 | static void executeFSubInst(GenericValue &Dest, GenericValue Src1, |
120 | GenericValue Src2, Type *Ty) { |
121 | switch (Ty->getTypeID()) { |
122 | IMPLEMENT_BINARY_OPERATOR(-, Float); |
123 | IMPLEMENT_BINARY_OPERATOR(-, Double); |
124 | default: |
125 | dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n" ; |
126 | llvm_unreachable(nullptr); |
127 | } |
128 | } |
129 | |
130 | static void executeFMulInst(GenericValue &Dest, GenericValue Src1, |
131 | GenericValue Src2, Type *Ty) { |
132 | switch (Ty->getTypeID()) { |
133 | IMPLEMENT_BINARY_OPERATOR(*, Float); |
134 | IMPLEMENT_BINARY_OPERATOR(*, Double); |
135 | default: |
136 | dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n" ; |
137 | llvm_unreachable(nullptr); |
138 | } |
139 | } |
140 | |
141 | static void executeFDivInst(GenericValue &Dest, GenericValue Src1, |
142 | GenericValue Src2, Type *Ty) { |
143 | switch (Ty->getTypeID()) { |
144 | IMPLEMENT_BINARY_OPERATOR(/, Float); |
145 | IMPLEMENT_BINARY_OPERATOR(/, Double); |
146 | default: |
147 | dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n" ; |
148 | llvm_unreachable(nullptr); |
149 | } |
150 | } |
151 | |
152 | static void executeFRemInst(GenericValue &Dest, GenericValue Src1, |
153 | GenericValue Src2, Type *Ty) { |
154 | switch (Ty->getTypeID()) { |
155 | case Type::FloatTyID: |
156 | Dest.FloatVal = fmod(x: Src1.FloatVal, y: Src2.FloatVal); |
157 | break; |
158 | case Type::DoubleTyID: |
159 | Dest.DoubleVal = fmod(x: Src1.DoubleVal, y: Src2.DoubleVal); |
160 | break; |
161 | default: |
162 | dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n" ; |
163 | llvm_unreachable(nullptr); |
164 | } |
165 | } |
166 | |
167 | #define IMPLEMENT_INTEGER_ICMP(OP, TY) \ |
168 | case Type::IntegerTyID: \ |
169 | Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \ |
170 | break; |
171 | |
172 | #define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY) \ |
173 | case Type::FixedVectorTyID: \ |
174 | case Type::ScalableVectorTyID: { \ |
175 | assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \ |
176 | Dest.AggregateVal.resize(Src1.AggregateVal.size()); \ |
177 | for (uint32_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \ |
178 | Dest.AggregateVal[_i].IntVal = APInt( \ |
179 | 1, Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal)); \ |
180 | } break; |
181 | |
182 | // Handle pointers specially because they must be compared with only as much |
183 | // width as the host has. We _do not_ want to be comparing 64 bit values when |
184 | // running on a 32-bit target, otherwise the upper 32 bits might mess up |
185 | // comparisons if they contain garbage. |
186 | #define IMPLEMENT_POINTER_ICMP(OP) \ |
187 | case Type::PointerTyID: \ |
188 | Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \ |
189 | (void*)(intptr_t)Src2.PointerVal); \ |
190 | break; |
191 | |
192 | static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2, |
193 | Type *Ty) { |
194 | GenericValue Dest; |
195 | switch (Ty->getTypeID()) { |
196 | IMPLEMENT_INTEGER_ICMP(eq,Ty); |
197 | IMPLEMENT_VECTOR_INTEGER_ICMP(eq,Ty); |
198 | IMPLEMENT_POINTER_ICMP(==); |
199 | default: |
200 | dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n" ; |
201 | llvm_unreachable(nullptr); |
202 | } |
203 | return Dest; |
204 | } |
205 | |
206 | static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2, |
207 | Type *Ty) { |
208 | GenericValue Dest; |
209 | switch (Ty->getTypeID()) { |
210 | IMPLEMENT_INTEGER_ICMP(ne,Ty); |
211 | IMPLEMENT_VECTOR_INTEGER_ICMP(ne,Ty); |
212 | IMPLEMENT_POINTER_ICMP(!=); |
213 | default: |
214 | dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n" ; |
215 | llvm_unreachable(nullptr); |
216 | } |
217 | return Dest; |
218 | } |
219 | |
220 | static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2, |
221 | Type *Ty) { |
222 | GenericValue Dest; |
223 | switch (Ty->getTypeID()) { |
224 | IMPLEMENT_INTEGER_ICMP(ult,Ty); |
225 | IMPLEMENT_VECTOR_INTEGER_ICMP(ult,Ty); |
226 | IMPLEMENT_POINTER_ICMP(<); |
227 | default: |
228 | dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n" ; |
229 | llvm_unreachable(nullptr); |
230 | } |
231 | return Dest; |
232 | } |
233 | |
234 | static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2, |
235 | Type *Ty) { |
236 | GenericValue Dest; |
237 | switch (Ty->getTypeID()) { |
238 | IMPLEMENT_INTEGER_ICMP(slt,Ty); |
239 | IMPLEMENT_VECTOR_INTEGER_ICMP(slt,Ty); |
240 | IMPLEMENT_POINTER_ICMP(<); |
241 | default: |
242 | dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n" ; |
243 | llvm_unreachable(nullptr); |
244 | } |
245 | return Dest; |
246 | } |
247 | |
248 | static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2, |
249 | Type *Ty) { |
250 | GenericValue Dest; |
251 | switch (Ty->getTypeID()) { |
252 | IMPLEMENT_INTEGER_ICMP(ugt,Ty); |
253 | IMPLEMENT_VECTOR_INTEGER_ICMP(ugt,Ty); |
254 | IMPLEMENT_POINTER_ICMP(>); |
255 | default: |
256 | dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n" ; |
257 | llvm_unreachable(nullptr); |
258 | } |
259 | return Dest; |
260 | } |
261 | |
262 | static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2, |
263 | Type *Ty) { |
264 | GenericValue Dest; |
265 | switch (Ty->getTypeID()) { |
266 | IMPLEMENT_INTEGER_ICMP(sgt,Ty); |
267 | IMPLEMENT_VECTOR_INTEGER_ICMP(sgt,Ty); |
268 | IMPLEMENT_POINTER_ICMP(>); |
269 | default: |
270 | dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n" ; |
271 | llvm_unreachable(nullptr); |
272 | } |
273 | return Dest; |
274 | } |
275 | |
276 | static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2, |
277 | Type *Ty) { |
278 | GenericValue Dest; |
279 | switch (Ty->getTypeID()) { |
280 | IMPLEMENT_INTEGER_ICMP(ule,Ty); |
281 | IMPLEMENT_VECTOR_INTEGER_ICMP(ule,Ty); |
282 | IMPLEMENT_POINTER_ICMP(<=); |
283 | default: |
284 | dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n" ; |
285 | llvm_unreachable(nullptr); |
286 | } |
287 | return Dest; |
288 | } |
289 | |
290 | static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2, |
291 | Type *Ty) { |
292 | GenericValue Dest; |
293 | switch (Ty->getTypeID()) { |
294 | IMPLEMENT_INTEGER_ICMP(sle,Ty); |
295 | IMPLEMENT_VECTOR_INTEGER_ICMP(sle,Ty); |
296 | IMPLEMENT_POINTER_ICMP(<=); |
297 | default: |
298 | dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n" ; |
299 | llvm_unreachable(nullptr); |
300 | } |
301 | return Dest; |
302 | } |
303 | |
304 | static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2, |
305 | Type *Ty) { |
306 | GenericValue Dest; |
307 | switch (Ty->getTypeID()) { |
308 | IMPLEMENT_INTEGER_ICMP(uge,Ty); |
309 | IMPLEMENT_VECTOR_INTEGER_ICMP(uge,Ty); |
310 | IMPLEMENT_POINTER_ICMP(>=); |
311 | default: |
312 | dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n" ; |
313 | llvm_unreachable(nullptr); |
314 | } |
315 | return Dest; |
316 | } |
317 | |
318 | static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2, |
319 | Type *Ty) { |
320 | GenericValue Dest; |
321 | switch (Ty->getTypeID()) { |
322 | IMPLEMENT_INTEGER_ICMP(sge,Ty); |
323 | IMPLEMENT_VECTOR_INTEGER_ICMP(sge,Ty); |
324 | IMPLEMENT_POINTER_ICMP(>=); |
325 | default: |
326 | dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n" ; |
327 | llvm_unreachable(nullptr); |
328 | } |
329 | return Dest; |
330 | } |
331 | |
332 | void Interpreter::visitICmpInst(ICmpInst &I) { |
333 | ExecutionContext &SF = ECStack.back(); |
334 | Type *Ty = I.getOperand(i_nocapture: 0)->getType(); |
335 | GenericValue Src1 = getOperandValue(V: I.getOperand(i_nocapture: 0), SF); |
336 | GenericValue Src2 = getOperandValue(V: I.getOperand(i_nocapture: 1), SF); |
337 | GenericValue R; // Result |
338 | |
339 | switch (I.getPredicate()) { |
340 | case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break; |
341 | case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break; |
342 | case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break; |
343 | case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break; |
344 | case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break; |
345 | case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break; |
346 | case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break; |
347 | case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break; |
348 | case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break; |
349 | case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break; |
350 | default: |
351 | dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I; |
352 | llvm_unreachable(nullptr); |
353 | } |
354 | |
355 | SetValue(V: &I, Val: R, SF); |
356 | } |
357 | |
358 | #define IMPLEMENT_FCMP(OP, TY) \ |
359 | case Type::TY##TyID: \ |
360 | Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \ |
361 | break |
362 | |
363 | #define IMPLEMENT_VECTOR_FCMP_T(OP, TY) \ |
364 | assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \ |
365 | Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \ |
366 | for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \ |
367 | Dest.AggregateVal[_i].IntVal = APInt(1, \ |
368 | Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\ |
369 | break; |
370 | |
371 | #define IMPLEMENT_VECTOR_FCMP(OP) \ |
372 | case Type::FixedVectorTyID: \ |
373 | case Type::ScalableVectorTyID: \ |
374 | if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \ |
375 | IMPLEMENT_VECTOR_FCMP_T(OP, Float); \ |
376 | } else { \ |
377 | IMPLEMENT_VECTOR_FCMP_T(OP, Double); \ |
378 | } |
379 | |
380 | static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2, |
381 | Type *Ty) { |
382 | GenericValue Dest; |
383 | switch (Ty->getTypeID()) { |
384 | IMPLEMENT_FCMP(==, Float); |
385 | IMPLEMENT_FCMP(==, Double); |
386 | IMPLEMENT_VECTOR_FCMP(==); |
387 | default: |
388 | dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n" ; |
389 | llvm_unreachable(nullptr); |
390 | } |
391 | return Dest; |
392 | } |
393 | |
394 | #define IMPLEMENT_SCALAR_NANS(TY, X,Y) \ |
395 | if (TY->isFloatTy()) { \ |
396 | if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \ |
397 | Dest.IntVal = APInt(1,false); \ |
398 | return Dest; \ |
399 | } \ |
400 | } else { \ |
401 | if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \ |
402 | Dest.IntVal = APInt(1,false); \ |
403 | return Dest; \ |
404 | } \ |
405 | } |
406 | |
407 | #define MASK_VECTOR_NANS_T(X,Y, TZ, FLAG) \ |
408 | assert(X.AggregateVal.size() == Y.AggregateVal.size()); \ |
409 | Dest.AggregateVal.resize( X.AggregateVal.size() ); \ |
410 | for( uint32_t _i=0;_i<X.AggregateVal.size();_i++) { \ |
411 | if (X.AggregateVal[_i].TZ##Val != X.AggregateVal[_i].TZ##Val || \ |
412 | Y.AggregateVal[_i].TZ##Val != Y.AggregateVal[_i].TZ##Val) \ |
413 | Dest.AggregateVal[_i].IntVal = APInt(1,FLAG); \ |
414 | else { \ |
415 | Dest.AggregateVal[_i].IntVal = APInt(1,!FLAG); \ |
416 | } \ |
417 | } |
418 | |
419 | #define MASK_VECTOR_NANS(TY, X,Y, FLAG) \ |
420 | if (TY->isVectorTy()) { \ |
421 | if (cast<VectorType>(TY)->getElementType()->isFloatTy()) { \ |
422 | MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \ |
423 | } else { \ |
424 | MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \ |
425 | } \ |
426 | } \ |
427 | |
428 | |
429 | |
430 | static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2, |
431 | Type *Ty) |
432 | { |
433 | GenericValue Dest; |
434 | // if input is scalar value and Src1 or Src2 is NaN return false |
435 | IMPLEMENT_SCALAR_NANS(Ty, Src1, Src2) |
436 | // if vector input detect NaNs and fill mask |
437 | MASK_VECTOR_NANS(Ty, Src1, Src2, false) |
438 | GenericValue DestMask = Dest; |
439 | switch (Ty->getTypeID()) { |
440 | IMPLEMENT_FCMP(!=, Float); |
441 | IMPLEMENT_FCMP(!=, Double); |
442 | IMPLEMENT_VECTOR_FCMP(!=); |
443 | default: |
444 | dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n" ; |
445 | llvm_unreachable(nullptr); |
446 | } |
447 | // in vector case mask out NaN elements |
448 | if (Ty->isVectorTy()) |
449 | for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++) |
450 | if (DestMask.AggregateVal[_i].IntVal == false) |
451 | Dest.AggregateVal[_i].IntVal = APInt(1,false); |
452 | |
453 | return Dest; |
454 | } |
455 | |
456 | static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2, |
457 | Type *Ty) { |
458 | GenericValue Dest; |
459 | switch (Ty->getTypeID()) { |
460 | IMPLEMENT_FCMP(<=, Float); |
461 | IMPLEMENT_FCMP(<=, Double); |
462 | IMPLEMENT_VECTOR_FCMP(<=); |
463 | default: |
464 | dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n" ; |
465 | llvm_unreachable(nullptr); |
466 | } |
467 | return Dest; |
468 | } |
469 | |
470 | static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2, |
471 | Type *Ty) { |
472 | GenericValue Dest; |
473 | switch (Ty->getTypeID()) { |
474 | IMPLEMENT_FCMP(>=, Float); |
475 | IMPLEMENT_FCMP(>=, Double); |
476 | IMPLEMENT_VECTOR_FCMP(>=); |
477 | default: |
478 | dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n" ; |
479 | llvm_unreachable(nullptr); |
480 | } |
481 | return Dest; |
482 | } |
483 | |
484 | static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2, |
485 | Type *Ty) { |
486 | GenericValue Dest; |
487 | switch (Ty->getTypeID()) { |
488 | IMPLEMENT_FCMP(<, Float); |
489 | IMPLEMENT_FCMP(<, Double); |
490 | IMPLEMENT_VECTOR_FCMP(<); |
491 | default: |
492 | dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n" ; |
493 | llvm_unreachable(nullptr); |
494 | } |
495 | return Dest; |
496 | } |
497 | |
498 | static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2, |
499 | Type *Ty) { |
500 | GenericValue Dest; |
501 | switch (Ty->getTypeID()) { |
502 | IMPLEMENT_FCMP(>, Float); |
503 | IMPLEMENT_FCMP(>, Double); |
504 | IMPLEMENT_VECTOR_FCMP(>); |
505 | default: |
506 | dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n" ; |
507 | llvm_unreachable(nullptr); |
508 | } |
509 | return Dest; |
510 | } |
511 | |
512 | #define IMPLEMENT_UNORDERED(TY, X,Y) \ |
513 | if (TY->isFloatTy()) { \ |
514 | if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \ |
515 | Dest.IntVal = APInt(1,true); \ |
516 | return Dest; \ |
517 | } \ |
518 | } else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \ |
519 | Dest.IntVal = APInt(1,true); \ |
520 | return Dest; \ |
521 | } |
522 | |
523 | #define IMPLEMENT_VECTOR_UNORDERED(TY, X, Y, FUNC) \ |
524 | if (TY->isVectorTy()) { \ |
525 | GenericValue DestMask = Dest; \ |
526 | Dest = FUNC(Src1, Src2, Ty); \ |
527 | for (size_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \ |
528 | if (DestMask.AggregateVal[_i].IntVal == true) \ |
529 | Dest.AggregateVal[_i].IntVal = APInt(1, true); \ |
530 | return Dest; \ |
531 | } |
532 | |
533 | static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2, |
534 | Type *Ty) { |
535 | GenericValue Dest; |
536 | IMPLEMENT_UNORDERED(Ty, Src1, Src2) |
537 | MASK_VECTOR_NANS(Ty, Src1, Src2, true) |
538 | IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OEQ) |
539 | return executeFCMP_OEQ(Src1, Src2, Ty); |
540 | |
541 | } |
542 | |
543 | static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2, |
544 | Type *Ty) { |
545 | GenericValue Dest; |
546 | IMPLEMENT_UNORDERED(Ty, Src1, Src2) |
547 | MASK_VECTOR_NANS(Ty, Src1, Src2, true) |
548 | IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_ONE) |
549 | return executeFCMP_ONE(Src1, Src2, Ty); |
550 | } |
551 | |
552 | static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2, |
553 | Type *Ty) { |
554 | GenericValue Dest; |
555 | IMPLEMENT_UNORDERED(Ty, Src1, Src2) |
556 | MASK_VECTOR_NANS(Ty, Src1, Src2, true) |
557 | IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLE) |
558 | return executeFCMP_OLE(Src1, Src2, Ty); |
559 | } |
560 | |
561 | static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2, |
562 | Type *Ty) { |
563 | GenericValue Dest; |
564 | IMPLEMENT_UNORDERED(Ty, Src1, Src2) |
565 | MASK_VECTOR_NANS(Ty, Src1, Src2, true) |
566 | IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGE) |
567 | return executeFCMP_OGE(Src1, Src2, Ty); |
568 | } |
569 | |
570 | static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2, |
571 | Type *Ty) { |
572 | GenericValue Dest; |
573 | IMPLEMENT_UNORDERED(Ty, Src1, Src2) |
574 | MASK_VECTOR_NANS(Ty, Src1, Src2, true) |
575 | IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLT) |
576 | return executeFCMP_OLT(Src1, Src2, Ty); |
577 | } |
578 | |
579 | static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2, |
580 | Type *Ty) { |
581 | GenericValue Dest; |
582 | IMPLEMENT_UNORDERED(Ty, Src1, Src2) |
583 | MASK_VECTOR_NANS(Ty, Src1, Src2, true) |
584 | IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGT) |
585 | return executeFCMP_OGT(Src1, Src2, Ty); |
586 | } |
587 | |
588 | static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2, |
589 | Type *Ty) { |
590 | GenericValue Dest; |
591 | if(Ty->isVectorTy()) { |
592 | assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); |
593 | Dest.AggregateVal.resize( new_size: Src1.AggregateVal.size() ); |
594 | if (cast<VectorType>(Val: Ty)->getElementType()->isFloatTy()) { |
595 | for( size_t _i=0;_i<Src1.AggregateVal.size();_i++) |
596 | Dest.AggregateVal[_i].IntVal = APInt(1, |
597 | ( (Src1.AggregateVal[_i].FloatVal == |
598 | Src1.AggregateVal[_i].FloatVal) && |
599 | (Src2.AggregateVal[_i].FloatVal == |
600 | Src2.AggregateVal[_i].FloatVal))); |
601 | } else { |
602 | for( size_t _i=0;_i<Src1.AggregateVal.size();_i++) |
603 | Dest.AggregateVal[_i].IntVal = APInt(1, |
604 | ( (Src1.AggregateVal[_i].DoubleVal == |
605 | Src1.AggregateVal[_i].DoubleVal) && |
606 | (Src2.AggregateVal[_i].DoubleVal == |
607 | Src2.AggregateVal[_i].DoubleVal))); |
608 | } |
609 | } else if (Ty->isFloatTy()) |
610 | Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal && |
611 | Src2.FloatVal == Src2.FloatVal)); |
612 | else { |
613 | Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal && |
614 | Src2.DoubleVal == Src2.DoubleVal)); |
615 | } |
616 | return Dest; |
617 | } |
618 | |
619 | static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2, |
620 | Type *Ty) { |
621 | GenericValue Dest; |
622 | if(Ty->isVectorTy()) { |
623 | assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); |
624 | Dest.AggregateVal.resize( new_size: Src1.AggregateVal.size() ); |
625 | if (cast<VectorType>(Val: Ty)->getElementType()->isFloatTy()) { |
626 | for( size_t _i=0;_i<Src1.AggregateVal.size();_i++) |
627 | Dest.AggregateVal[_i].IntVal = APInt(1, |
628 | ( (Src1.AggregateVal[_i].FloatVal != |
629 | Src1.AggregateVal[_i].FloatVal) || |
630 | (Src2.AggregateVal[_i].FloatVal != |
631 | Src2.AggregateVal[_i].FloatVal))); |
632 | } else { |
633 | for( size_t _i=0;_i<Src1.AggregateVal.size();_i++) |
634 | Dest.AggregateVal[_i].IntVal = APInt(1, |
635 | ( (Src1.AggregateVal[_i].DoubleVal != |
636 | Src1.AggregateVal[_i].DoubleVal) || |
637 | (Src2.AggregateVal[_i].DoubleVal != |
638 | Src2.AggregateVal[_i].DoubleVal))); |
639 | } |
640 | } else if (Ty->isFloatTy()) |
641 | Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal || |
642 | Src2.FloatVal != Src2.FloatVal)); |
643 | else { |
644 | Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal || |
645 | Src2.DoubleVal != Src2.DoubleVal)); |
646 | } |
647 | return Dest; |
648 | } |
649 | |
650 | static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2, |
651 | Type *Ty, const bool val) { |
652 | GenericValue Dest; |
653 | if(Ty->isVectorTy()) { |
654 | assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); |
655 | Dest.AggregateVal.resize( new_size: Src1.AggregateVal.size() ); |
656 | for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++) |
657 | Dest.AggregateVal[_i].IntVal = APInt(1,val); |
658 | } else { |
659 | Dest.IntVal = APInt(1, val); |
660 | } |
661 | |
662 | return Dest; |
663 | } |
664 | |
665 | void Interpreter::visitFCmpInst(FCmpInst &I) { |
666 | ExecutionContext &SF = ECStack.back(); |
667 | Type *Ty = I.getOperand(i_nocapture: 0)->getType(); |
668 | GenericValue Src1 = getOperandValue(V: I.getOperand(i_nocapture: 0), SF); |
669 | GenericValue Src2 = getOperandValue(V: I.getOperand(i_nocapture: 1), SF); |
670 | GenericValue R; // Result |
671 | |
672 | switch (I.getPredicate()) { |
673 | default: |
674 | dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I; |
675 | llvm_unreachable(nullptr); |
676 | break; |
677 | case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, val: false); |
678 | break; |
679 | case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, val: true); |
680 | break; |
681 | case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break; |
682 | case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break; |
683 | case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break; |
684 | case FCmpInst::FCMP_OEQ: R = executeFCMP_OEQ(Src1, Src2, Ty); break; |
685 | case FCmpInst::FCMP_UNE: R = executeFCMP_UNE(Src1, Src2, Ty); break; |
686 | case FCmpInst::FCMP_ONE: R = executeFCMP_ONE(Src1, Src2, Ty); break; |
687 | case FCmpInst::FCMP_ULT: R = executeFCMP_ULT(Src1, Src2, Ty); break; |
688 | case FCmpInst::FCMP_OLT: R = executeFCMP_OLT(Src1, Src2, Ty); break; |
689 | case FCmpInst::FCMP_UGT: R = executeFCMP_UGT(Src1, Src2, Ty); break; |
690 | case FCmpInst::FCMP_OGT: R = executeFCMP_OGT(Src1, Src2, Ty); break; |
691 | case FCmpInst::FCMP_ULE: R = executeFCMP_ULE(Src1, Src2, Ty); break; |
692 | case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break; |
693 | case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break; |
694 | case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break; |
695 | } |
696 | |
697 | SetValue(V: &I, Val: R, SF); |
698 | } |
699 | |
700 | static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1, |
701 | GenericValue Src2, Type *Ty) { |
702 | GenericValue Result; |
703 | switch (predicate) { |
704 | case ICmpInst::ICMP_EQ: return executeICMP_EQ(Src1, Src2, Ty); |
705 | case ICmpInst::ICMP_NE: return executeICMP_NE(Src1, Src2, Ty); |
706 | case ICmpInst::ICMP_UGT: return executeICMP_UGT(Src1, Src2, Ty); |
707 | case ICmpInst::ICMP_SGT: return executeICMP_SGT(Src1, Src2, Ty); |
708 | case ICmpInst::ICMP_ULT: return executeICMP_ULT(Src1, Src2, Ty); |
709 | case ICmpInst::ICMP_SLT: return executeICMP_SLT(Src1, Src2, Ty); |
710 | case ICmpInst::ICMP_UGE: return executeICMP_UGE(Src1, Src2, Ty); |
711 | case ICmpInst::ICMP_SGE: return executeICMP_SGE(Src1, Src2, Ty); |
712 | case ICmpInst::ICMP_ULE: return executeICMP_ULE(Src1, Src2, Ty); |
713 | case ICmpInst::ICMP_SLE: return executeICMP_SLE(Src1, Src2, Ty); |
714 | case FCmpInst::FCMP_ORD: return executeFCMP_ORD(Src1, Src2, Ty); |
715 | case FCmpInst::FCMP_UNO: return executeFCMP_UNO(Src1, Src2, Ty); |
716 | case FCmpInst::FCMP_OEQ: return executeFCMP_OEQ(Src1, Src2, Ty); |
717 | case FCmpInst::FCMP_UEQ: return executeFCMP_UEQ(Src1, Src2, Ty); |
718 | case FCmpInst::FCMP_ONE: return executeFCMP_ONE(Src1, Src2, Ty); |
719 | case FCmpInst::FCMP_UNE: return executeFCMP_UNE(Src1, Src2, Ty); |
720 | case FCmpInst::FCMP_OLT: return executeFCMP_OLT(Src1, Src2, Ty); |
721 | case FCmpInst::FCMP_ULT: return executeFCMP_ULT(Src1, Src2, Ty); |
722 | case FCmpInst::FCMP_OGT: return executeFCMP_OGT(Src1, Src2, Ty); |
723 | case FCmpInst::FCMP_UGT: return executeFCMP_UGT(Src1, Src2, Ty); |
724 | case FCmpInst::FCMP_OLE: return executeFCMP_OLE(Src1, Src2, Ty); |
725 | case FCmpInst::FCMP_ULE: return executeFCMP_ULE(Src1, Src2, Ty); |
726 | case FCmpInst::FCMP_OGE: return executeFCMP_OGE(Src1, Src2, Ty); |
727 | case FCmpInst::FCMP_UGE: return executeFCMP_UGE(Src1, Src2, Ty); |
728 | case FCmpInst::FCMP_FALSE: return executeFCMP_BOOL(Src1, Src2, Ty, val: false); |
729 | case FCmpInst::FCMP_TRUE: return executeFCMP_BOOL(Src1, Src2, Ty, val: true); |
730 | default: |
731 | dbgs() << "Unhandled Cmp predicate\n" ; |
732 | llvm_unreachable(nullptr); |
733 | } |
734 | } |
735 | |
736 | void Interpreter::visitBinaryOperator(BinaryOperator &I) { |
737 | ExecutionContext &SF = ECStack.back(); |
738 | Type *Ty = I.getOperand(i_nocapture: 0)->getType(); |
739 | GenericValue Src1 = getOperandValue(V: I.getOperand(i_nocapture: 0), SF); |
740 | GenericValue Src2 = getOperandValue(V: I.getOperand(i_nocapture: 1), SF); |
741 | GenericValue R; // Result |
742 | |
743 | // First process vector operation |
744 | if (Ty->isVectorTy()) { |
745 | assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); |
746 | R.AggregateVal.resize(new_size: Src1.AggregateVal.size()); |
747 | |
748 | // Macros to execute binary operation 'OP' over integer vectors |
749 | #define INTEGER_VECTOR_OPERATION(OP) \ |
750 | for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \ |
751 | R.AggregateVal[i].IntVal = \ |
752 | Src1.AggregateVal[i].IntVal OP Src2.AggregateVal[i].IntVal; |
753 | |
754 | // Additional macros to execute binary operations udiv/sdiv/urem/srem since |
755 | // they have different notation. |
756 | #define INTEGER_VECTOR_FUNCTION(OP) \ |
757 | for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \ |
758 | R.AggregateVal[i].IntVal = \ |
759 | Src1.AggregateVal[i].IntVal.OP(Src2.AggregateVal[i].IntVal); |
760 | |
761 | // Macros to execute binary operation 'OP' over floating point type TY |
762 | // (float or double) vectors |
763 | #define FLOAT_VECTOR_FUNCTION(OP, TY) \ |
764 | for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \ |
765 | R.AggregateVal[i].TY = \ |
766 | Src1.AggregateVal[i].TY OP Src2.AggregateVal[i].TY; |
767 | |
768 | // Macros to choose appropriate TY: float or double and run operation |
769 | // execution |
770 | #define FLOAT_VECTOR_OP(OP) { \ |
771 | if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) \ |
772 | FLOAT_VECTOR_FUNCTION(OP, FloatVal) \ |
773 | else { \ |
774 | if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \ |
775 | FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \ |
776 | else { \ |
777 | dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \ |
778 | llvm_unreachable(0); \ |
779 | } \ |
780 | } \ |
781 | } |
782 | |
783 | switch(I.getOpcode()){ |
784 | default: |
785 | dbgs() << "Don't know how to handle this binary operator!\n-->" << I; |
786 | llvm_unreachable(nullptr); |
787 | break; |
788 | case Instruction::Add: INTEGER_VECTOR_OPERATION(+) break; |
789 | case Instruction::Sub: INTEGER_VECTOR_OPERATION(-) break; |
790 | case Instruction::Mul: INTEGER_VECTOR_OPERATION(*) break; |
791 | case Instruction::UDiv: INTEGER_VECTOR_FUNCTION(udiv) break; |
792 | case Instruction::SDiv: INTEGER_VECTOR_FUNCTION(sdiv) break; |
793 | case Instruction::URem: INTEGER_VECTOR_FUNCTION(urem) break; |
794 | case Instruction::SRem: INTEGER_VECTOR_FUNCTION(srem) break; |
795 | case Instruction::And: INTEGER_VECTOR_OPERATION(&) break; |
796 | case Instruction::Or: INTEGER_VECTOR_OPERATION(|) break; |
797 | case Instruction::Xor: INTEGER_VECTOR_OPERATION(^) break; |
798 | case Instruction::FAdd: FLOAT_VECTOR_OP(+) break; |
799 | case Instruction::FSub: FLOAT_VECTOR_OP(-) break; |
800 | case Instruction::FMul: FLOAT_VECTOR_OP(*) break; |
801 | case Instruction::FDiv: FLOAT_VECTOR_OP(/) break; |
802 | case Instruction::FRem: |
803 | if (cast<VectorType>(Val: Ty)->getElementType()->isFloatTy()) |
804 | for (unsigned i = 0; i < R.AggregateVal.size(); ++i) |
805 | R.AggregateVal[i].FloatVal = |
806 | fmod(x: Src1.AggregateVal[i].FloatVal, y: Src2.AggregateVal[i].FloatVal); |
807 | else { |
808 | if (cast<VectorType>(Val: Ty)->getElementType()->isDoubleTy()) |
809 | for (unsigned i = 0; i < R.AggregateVal.size(); ++i) |
810 | R.AggregateVal[i].DoubleVal = |
811 | fmod(x: Src1.AggregateVal[i].DoubleVal, y: Src2.AggregateVal[i].DoubleVal); |
812 | else { |
813 | dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n" ; |
814 | llvm_unreachable(nullptr); |
815 | } |
816 | } |
817 | break; |
818 | } |
819 | } else { |
820 | switch (I.getOpcode()) { |
821 | default: |
822 | dbgs() << "Don't know how to handle this binary operator!\n-->" << I; |
823 | llvm_unreachable(nullptr); |
824 | break; |
825 | case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break; |
826 | case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break; |
827 | case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break; |
828 | case Instruction::FAdd: executeFAddInst(Dest&: R, Src1, Src2, Ty); break; |
829 | case Instruction::FSub: executeFSubInst(Dest&: R, Src1, Src2, Ty); break; |
830 | case Instruction::FMul: executeFMulInst(Dest&: R, Src1, Src2, Ty); break; |
831 | case Instruction::FDiv: executeFDivInst(Dest&: R, Src1, Src2, Ty); break; |
832 | case Instruction::FRem: executeFRemInst(Dest&: R, Src1, Src2, Ty); break; |
833 | case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(RHS: Src2.IntVal); break; |
834 | case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(RHS: Src2.IntVal); break; |
835 | case Instruction::URem: R.IntVal = Src1.IntVal.urem(RHS: Src2.IntVal); break; |
836 | case Instruction::SRem: R.IntVal = Src1.IntVal.srem(RHS: Src2.IntVal); break; |
837 | case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break; |
838 | case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break; |
839 | case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break; |
840 | } |
841 | } |
842 | SetValue(V: &I, Val: R, SF); |
843 | } |
844 | |
845 | static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2, |
846 | GenericValue Src3, Type *Ty) { |
847 | GenericValue Dest; |
848 | if(Ty->isVectorTy()) { |
849 | assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); |
850 | assert(Src2.AggregateVal.size() == Src3.AggregateVal.size()); |
851 | Dest.AggregateVal.resize( new_size: Src1.AggregateVal.size() ); |
852 | for (size_t i = 0; i < Src1.AggregateVal.size(); ++i) |
853 | Dest.AggregateVal[i] = (Src1.AggregateVal[i].IntVal == 0) ? |
854 | Src3.AggregateVal[i] : Src2.AggregateVal[i]; |
855 | } else { |
856 | Dest = (Src1.IntVal == 0) ? Src3 : Src2; |
857 | } |
858 | return Dest; |
859 | } |
860 | |
861 | void Interpreter::visitSelectInst(SelectInst &I) { |
862 | ExecutionContext &SF = ECStack.back(); |
863 | Type * Ty = I.getOperand(i_nocapture: 0)->getType(); |
864 | GenericValue Src1 = getOperandValue(V: I.getOperand(i_nocapture: 0), SF); |
865 | GenericValue Src2 = getOperandValue(V: I.getOperand(i_nocapture: 1), SF); |
866 | GenericValue Src3 = getOperandValue(V: I.getOperand(i_nocapture: 2), SF); |
867 | GenericValue R = executeSelectInst(Src1, Src2, Src3, Ty); |
868 | SetValue(V: &I, Val: R, SF); |
869 | } |
870 | |
871 | //===----------------------------------------------------------------------===// |
872 | // Terminator Instruction Implementations |
873 | //===----------------------------------------------------------------------===// |
874 | |
875 | void Interpreter::exitCalled(GenericValue GV) { |
876 | // runAtExitHandlers() assumes there are no stack frames, but |
877 | // if exit() was called, then it had a stack frame. Blow away |
878 | // the stack before interpreting atexit handlers. |
879 | ECStack.clear(); |
880 | runAtExitHandlers(); |
881 | exit(status: GV.IntVal.zextOrTrunc(width: 32).getZExtValue()); |
882 | } |
883 | |
884 | /// Pop the last stack frame off of ECStack and then copy the result |
885 | /// back into the result variable if we are not returning void. The |
886 | /// result variable may be the ExitValue, or the Value of the calling |
887 | /// CallInst if there was a previous stack frame. This method may |
888 | /// invalidate any ECStack iterators you have. This method also takes |
889 | /// care of switching to the normal destination BB, if we are returning |
890 | /// from an invoke. |
891 | /// |
892 | void Interpreter::popStackAndReturnValueToCaller(Type *RetTy, |
893 | GenericValue Result) { |
894 | // Pop the current stack frame. |
895 | ECStack.pop_back(); |
896 | |
897 | if (ECStack.empty()) { // Finished main. Put result into exit code... |
898 | if (RetTy && !RetTy->isVoidTy()) { // Nonvoid return type? |
899 | ExitValue = Result; // Capture the exit value of the program |
900 | } else { |
901 | memset(s: &ExitValue.Untyped, c: 0, n: sizeof(ExitValue.Untyped)); |
902 | } |
903 | } else { |
904 | // If we have a previous stack frame, and we have a previous call, |
905 | // fill in the return value... |
906 | ExecutionContext &CallingSF = ECStack.back(); |
907 | if (CallingSF.Caller) { |
908 | // Save result... |
909 | if (!CallingSF.Caller->getType()->isVoidTy()) |
910 | SetValue(V: CallingSF.Caller, Val: Result, SF&: CallingSF); |
911 | if (InvokeInst *II = dyn_cast<InvokeInst>(Val: CallingSF.Caller)) |
912 | SwitchToNewBasicBlock (Dest: II->getNormalDest (), SF&: CallingSF); |
913 | CallingSF.Caller = nullptr; // We returned from the call... |
914 | } |
915 | } |
916 | } |
917 | |
918 | void Interpreter::visitReturnInst(ReturnInst &I) { |
919 | ExecutionContext &SF = ECStack.back(); |
920 | Type *RetTy = Type::getVoidTy(C&: I.getContext()); |
921 | GenericValue Result; |
922 | |
923 | // Save away the return value... (if we are not 'ret void') |
924 | if (I.getNumOperands()) { |
925 | RetTy = I.getReturnValue()->getType(); |
926 | Result = getOperandValue(V: I.getReturnValue(), SF); |
927 | } |
928 | |
929 | popStackAndReturnValueToCaller(RetTy, Result); |
930 | } |
931 | |
932 | void Interpreter::visitUnreachableInst(UnreachableInst &I) { |
933 | report_fatal_error(reason: "Program executed an 'unreachable' instruction!" ); |
934 | } |
935 | |
936 | void Interpreter::visitBranchInst(BranchInst &I) { |
937 | ExecutionContext &SF = ECStack.back(); |
938 | BasicBlock *Dest; |
939 | |
940 | Dest = I.getSuccessor(i: 0); // Uncond branches have a fixed dest... |
941 | if (!I.isUnconditional()) { |
942 | Value *Cond = I.getCondition(); |
943 | if (getOperandValue(V: Cond, SF).IntVal == 0) // If false cond... |
944 | Dest = I.getSuccessor(i: 1); |
945 | } |
946 | SwitchToNewBasicBlock(Dest, SF); |
947 | } |
948 | |
949 | void Interpreter::visitSwitchInst(SwitchInst &I) { |
950 | ExecutionContext &SF = ECStack.back(); |
951 | Value* Cond = I.getCondition(); |
952 | Type *ElTy = Cond->getType(); |
953 | GenericValue CondVal = getOperandValue(V: Cond, SF); |
954 | |
955 | // Check to see if any of the cases match... |
956 | BasicBlock *Dest = nullptr; |
957 | for (auto Case : I.cases()) { |
958 | GenericValue CaseVal = getOperandValue(V: Case.getCaseValue(), SF); |
959 | if (executeICMP_EQ(Src1: CondVal, Src2: CaseVal, Ty: ElTy).IntVal != 0) { |
960 | Dest = cast<BasicBlock>(Val: Case.getCaseSuccessor()); |
961 | break; |
962 | } |
963 | } |
964 | if (!Dest) Dest = I.getDefaultDest(); // No cases matched: use default |
965 | SwitchToNewBasicBlock(Dest, SF); |
966 | } |
967 | |
968 | void Interpreter::visitIndirectBrInst(IndirectBrInst &I) { |
969 | ExecutionContext &SF = ECStack.back(); |
970 | void *Dest = GVTOP(GV: getOperandValue(V: I.getAddress(), SF)); |
971 | SwitchToNewBasicBlock(Dest: (BasicBlock*)Dest, SF); |
972 | } |
973 | |
974 | |
975 | // SwitchToNewBasicBlock - This method is used to jump to a new basic block. |
976 | // This function handles the actual updating of block and instruction iterators |
977 | // as well as execution of all of the PHI nodes in the destination block. |
978 | // |
979 | // This method does this because all of the PHI nodes must be executed |
980 | // atomically, reading their inputs before any of the results are updated. Not |
981 | // doing this can cause problems if the PHI nodes depend on other PHI nodes for |
982 | // their inputs. If the input PHI node is updated before it is read, incorrect |
983 | // results can happen. Thus we use a two phase approach. |
984 | // |
985 | void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){ |
986 | BasicBlock *PrevBB = SF.CurBB; // Remember where we came from... |
987 | SF.CurBB = Dest; // Update CurBB to branch destination |
988 | SF.CurInst = SF.CurBB->begin(); // Update new instruction ptr... |
989 | |
990 | if (!isa<PHINode>(Val: SF.CurInst)) return; // Nothing fancy to do |
991 | |
992 | // Loop over all of the PHI nodes in the current block, reading their inputs. |
993 | std::vector<GenericValue> ResultValues; |
994 | |
995 | for (; PHINode *PN = dyn_cast<PHINode>(Val&: SF.CurInst); ++SF.CurInst) { |
996 | // Search for the value corresponding to this previous bb... |
997 | int i = PN->getBasicBlockIndex(BB: PrevBB); |
998 | assert(i != -1 && "PHINode doesn't contain entry for predecessor??" ); |
999 | Value *IncomingValue = PN->getIncomingValue(i); |
1000 | |
1001 | // Save the incoming value for this PHI node... |
1002 | ResultValues.push_back(x: getOperandValue(V: IncomingValue, SF)); |
1003 | } |
1004 | |
1005 | // Now loop over all of the PHI nodes setting their values... |
1006 | SF.CurInst = SF.CurBB->begin(); |
1007 | for (unsigned i = 0; isa<PHINode>(Val: SF.CurInst); ++SF.CurInst, ++i) { |
1008 | PHINode *PN = cast<PHINode>(Val&: SF.CurInst); |
1009 | SetValue(V: PN, Val: ResultValues[i], SF); |
1010 | } |
1011 | } |
1012 | |
1013 | //===----------------------------------------------------------------------===// |
1014 | // Memory Instruction Implementations |
1015 | //===----------------------------------------------------------------------===// |
1016 | |
1017 | void Interpreter::visitAllocaInst(AllocaInst &I) { |
1018 | ExecutionContext &SF = ECStack.back(); |
1019 | |
1020 | Type *Ty = I.getAllocatedType(); // Type to be allocated |
1021 | |
1022 | // Get the number of elements being allocated by the array... |
1023 | unsigned NumElements = |
1024 | getOperandValue(V: I.getOperand(i_nocapture: 0), SF).IntVal.getZExtValue(); |
1025 | |
1026 | unsigned TypeSize = (size_t)getDataLayout().getTypeAllocSize(Ty); |
1027 | |
1028 | // Avoid malloc-ing zero bytes, use max()... |
1029 | unsigned MemToAlloc = std::max(a: 1U, b: NumElements * TypeSize); |
1030 | |
1031 | // Allocate enough memory to hold the type... |
1032 | void *Memory = safe_malloc(Sz: MemToAlloc); |
1033 | |
1034 | LLVM_DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize |
1035 | << " bytes) x " << NumElements << " (Total: " << MemToAlloc |
1036 | << ") at " << uintptr_t(Memory) << '\n'); |
1037 | |
1038 | GenericValue Result = PTOGV(P: Memory); |
1039 | assert(Result.PointerVal && "Null pointer returned by malloc!" ); |
1040 | SetValue(V: &I, Val: Result, SF); |
1041 | |
1042 | if (I.getOpcode() == Instruction::Alloca) |
1043 | ECStack.back().Allocas.add(Mem: Memory); |
1044 | } |
1045 | |
1046 | // getElementOffset - The workhorse for getelementptr. |
1047 | // |
1048 | GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I, |
1049 | gep_type_iterator E, |
1050 | ExecutionContext &SF) { |
1051 | assert(Ptr->getType()->isPointerTy() && |
1052 | "Cannot getElementOffset of a nonpointer type!" ); |
1053 | |
1054 | uint64_t Total = 0; |
1055 | |
1056 | for (; I != E; ++I) { |
1057 | if (StructType *STy = I.getStructTypeOrNull()) { |
1058 | const StructLayout *SLO = getDataLayout().getStructLayout(Ty: STy); |
1059 | |
1060 | const ConstantInt *CPU = cast<ConstantInt>(Val: I.getOperand()); |
1061 | unsigned Index = unsigned(CPU->getZExtValue()); |
1062 | |
1063 | Total += SLO->getElementOffset(Idx: Index); |
1064 | } else { |
1065 | // Get the index number for the array... which must be long type... |
1066 | GenericValue IdxGV = getOperandValue(V: I.getOperand(), SF); |
1067 | |
1068 | int64_t Idx; |
1069 | unsigned BitWidth = |
1070 | cast<IntegerType>(Val: I.getOperand()->getType())->getBitWidth(); |
1071 | if (BitWidth == 32) |
1072 | Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue(); |
1073 | else { |
1074 | assert(BitWidth == 64 && "Invalid index type for getelementptr" ); |
1075 | Idx = (int64_t)IdxGV.IntVal.getZExtValue(); |
1076 | } |
1077 | Total += I.getSequentialElementStride(DL: getDataLayout()) * Idx; |
1078 | } |
1079 | } |
1080 | |
1081 | GenericValue Result; |
1082 | Result.PointerVal = ((char*)getOperandValue(V: Ptr, SF).PointerVal) + Total; |
1083 | LLVM_DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n" ); |
1084 | return Result; |
1085 | } |
1086 | |
1087 | void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) { |
1088 | ExecutionContext &SF = ECStack.back(); |
1089 | SetValue(V: &I, Val: executeGEPOperation(Ptr: I.getPointerOperand(), |
1090 | I: gep_type_begin(GEP: I), E: gep_type_end(GEP: I), SF), SF); |
1091 | } |
1092 | |
1093 | void Interpreter::visitLoadInst(LoadInst &I) { |
1094 | ExecutionContext &SF = ECStack.back(); |
1095 | GenericValue SRC = getOperandValue(V: I.getPointerOperand(), SF); |
1096 | GenericValue *Ptr = (GenericValue*)GVTOP(GV: SRC); |
1097 | GenericValue Result; |
1098 | LoadValueFromMemory(Result, Ptr, Ty: I.getType()); |
1099 | SetValue(V: &I, Val: Result, SF); |
1100 | if (I.isVolatile() && PrintVolatile) |
1101 | dbgs() << "Volatile load " << I; |
1102 | } |
1103 | |
1104 | void Interpreter::visitStoreInst(StoreInst &I) { |
1105 | ExecutionContext &SF = ECStack.back(); |
1106 | GenericValue Val = getOperandValue(V: I.getOperand(i_nocapture: 0), SF); |
1107 | GenericValue SRC = getOperandValue(V: I.getPointerOperand(), SF); |
1108 | StoreValueToMemory(Val, Ptr: (GenericValue *)GVTOP(GV: SRC), |
1109 | Ty: I.getOperand(i_nocapture: 0)->getType()); |
1110 | if (I.isVolatile() && PrintVolatile) |
1111 | dbgs() << "Volatile store: " << I; |
1112 | } |
1113 | |
1114 | //===----------------------------------------------------------------------===// |
1115 | // Miscellaneous Instruction Implementations |
1116 | //===----------------------------------------------------------------------===// |
1117 | |
1118 | void Interpreter::visitVAStartInst(VAStartInst &I) { |
1119 | ExecutionContext &SF = ECStack.back(); |
1120 | GenericValue ArgIndex; |
1121 | ArgIndex.UIntPairVal.first = ECStack.size() - 1; |
1122 | ArgIndex.UIntPairVal.second = 0; |
1123 | SetValue(V: &I, Val: ArgIndex, SF); |
1124 | } |
1125 | |
1126 | void Interpreter::visitVAEndInst(VAEndInst &I) { |
1127 | // va_end is a noop for the interpreter |
1128 | } |
1129 | |
1130 | void Interpreter::visitVACopyInst(VACopyInst &I) { |
1131 | ExecutionContext &SF = ECStack.back(); |
1132 | SetValue(V: &I, Val: getOperandValue(V: *I.arg_begin(), SF), SF); |
1133 | } |
1134 | |
1135 | void Interpreter::visitIntrinsicInst(IntrinsicInst &I) { |
1136 | ExecutionContext &SF = ECStack.back(); |
1137 | |
1138 | // If it is an unknown intrinsic function, use the intrinsic lowering |
1139 | // class to transform it into hopefully tasty LLVM code. |
1140 | // |
1141 | BasicBlock::iterator Me(&I); |
1142 | BasicBlock *Parent = I.getParent(); |
1143 | bool atBegin(Parent->begin() == Me); |
1144 | if (!atBegin) |
1145 | --Me; |
1146 | IL->LowerIntrinsicCall(CI: &I); |
1147 | |
1148 | // Restore the CurInst pointer to the first instruction newly inserted, if |
1149 | // any. |
1150 | if (atBegin) { |
1151 | SF.CurInst = Parent->begin(); |
1152 | } else { |
1153 | SF.CurInst = Me; |
1154 | ++SF.CurInst; |
1155 | } |
1156 | } |
1157 | |
1158 | void Interpreter::visitCallBase(CallBase &I) { |
1159 | ExecutionContext &SF = ECStack.back(); |
1160 | |
1161 | SF.Caller = &I; |
1162 | std::vector<GenericValue> ArgVals; |
1163 | const unsigned NumArgs = SF.Caller->arg_size(); |
1164 | ArgVals.reserve(n: NumArgs); |
1165 | for (Value *V : SF.Caller->args()) |
1166 | ArgVals.push_back(x: getOperandValue(V, SF)); |
1167 | |
1168 | // To handle indirect calls, we must get the pointer value from the argument |
1169 | // and treat it as a function pointer. |
1170 | GenericValue SRC = getOperandValue(V: SF.Caller->getCalledOperand(), SF); |
1171 | callFunction(F: (Function*)GVTOP(GV: SRC), ArgVals); |
1172 | } |
1173 | |
1174 | // auxiliary function for shift operations |
1175 | static unsigned getShiftAmount(uint64_t orgShiftAmount, |
1176 | llvm::APInt valueToShift) { |
1177 | unsigned valueWidth = valueToShift.getBitWidth(); |
1178 | if (orgShiftAmount < (uint64_t)valueWidth) |
1179 | return orgShiftAmount; |
1180 | // according to the llvm documentation, if orgShiftAmount > valueWidth, |
1181 | // the result is undfeined. but we do shift by this rule: |
1182 | return (NextPowerOf2(A: valueWidth-1) - 1) & orgShiftAmount; |
1183 | } |
1184 | |
1185 | |
1186 | void Interpreter::visitShl(BinaryOperator &I) { |
1187 | ExecutionContext &SF = ECStack.back(); |
1188 | GenericValue Src1 = getOperandValue(V: I.getOperand(i_nocapture: 0), SF); |
1189 | GenericValue Src2 = getOperandValue(V: I.getOperand(i_nocapture: 1), SF); |
1190 | GenericValue Dest; |
1191 | Type *Ty = I.getType(); |
1192 | |
1193 | if (Ty->isVectorTy()) { |
1194 | uint32_t src1Size = uint32_t(Src1.AggregateVal.size()); |
1195 | assert(src1Size == Src2.AggregateVal.size()); |
1196 | for (unsigned i = 0; i < src1Size; i++) { |
1197 | GenericValue Result; |
1198 | uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue(); |
1199 | llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal; |
1200 | Result.IntVal = valueToShift.shl(shiftAmt: getShiftAmount(orgShiftAmount: shiftAmount, valueToShift)); |
1201 | Dest.AggregateVal.push_back(x: Result); |
1202 | } |
1203 | } else { |
1204 | // scalar |
1205 | uint64_t shiftAmount = Src2.IntVal.getZExtValue(); |
1206 | llvm::APInt valueToShift = Src1.IntVal; |
1207 | Dest.IntVal = valueToShift.shl(shiftAmt: getShiftAmount(orgShiftAmount: shiftAmount, valueToShift)); |
1208 | } |
1209 | |
1210 | SetValue(V: &I, Val: Dest, SF); |
1211 | } |
1212 | |
1213 | void Interpreter::visitLShr(BinaryOperator &I) { |
1214 | ExecutionContext &SF = ECStack.back(); |
1215 | GenericValue Src1 = getOperandValue(V: I.getOperand(i_nocapture: 0), SF); |
1216 | GenericValue Src2 = getOperandValue(V: I.getOperand(i_nocapture: 1), SF); |
1217 | GenericValue Dest; |
1218 | Type *Ty = I.getType(); |
1219 | |
1220 | if (Ty->isVectorTy()) { |
1221 | uint32_t src1Size = uint32_t(Src1.AggregateVal.size()); |
1222 | assert(src1Size == Src2.AggregateVal.size()); |
1223 | for (unsigned i = 0; i < src1Size; i++) { |
1224 | GenericValue Result; |
1225 | uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue(); |
1226 | llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal; |
1227 | Result.IntVal = valueToShift.lshr(shiftAmt: getShiftAmount(orgShiftAmount: shiftAmount, valueToShift)); |
1228 | Dest.AggregateVal.push_back(x: Result); |
1229 | } |
1230 | } else { |
1231 | // scalar |
1232 | uint64_t shiftAmount = Src2.IntVal.getZExtValue(); |
1233 | llvm::APInt valueToShift = Src1.IntVal; |
1234 | Dest.IntVal = valueToShift.lshr(shiftAmt: getShiftAmount(orgShiftAmount: shiftAmount, valueToShift)); |
1235 | } |
1236 | |
1237 | SetValue(V: &I, Val: Dest, SF); |
1238 | } |
1239 | |
1240 | void Interpreter::visitAShr(BinaryOperator &I) { |
1241 | ExecutionContext &SF = ECStack.back(); |
1242 | GenericValue Src1 = getOperandValue(V: I.getOperand(i_nocapture: 0), SF); |
1243 | GenericValue Src2 = getOperandValue(V: I.getOperand(i_nocapture: 1), SF); |
1244 | GenericValue Dest; |
1245 | Type *Ty = I.getType(); |
1246 | |
1247 | if (Ty->isVectorTy()) { |
1248 | size_t src1Size = Src1.AggregateVal.size(); |
1249 | assert(src1Size == Src2.AggregateVal.size()); |
1250 | for (unsigned i = 0; i < src1Size; i++) { |
1251 | GenericValue Result; |
1252 | uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue(); |
1253 | llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal; |
1254 | Result.IntVal = valueToShift.ashr(ShiftAmt: getShiftAmount(orgShiftAmount: shiftAmount, valueToShift)); |
1255 | Dest.AggregateVal.push_back(x: Result); |
1256 | } |
1257 | } else { |
1258 | // scalar |
1259 | uint64_t shiftAmount = Src2.IntVal.getZExtValue(); |
1260 | llvm::APInt valueToShift = Src1.IntVal; |
1261 | Dest.IntVal = valueToShift.ashr(ShiftAmt: getShiftAmount(orgShiftAmount: shiftAmount, valueToShift)); |
1262 | } |
1263 | |
1264 | SetValue(V: &I, Val: Dest, SF); |
1265 | } |
1266 | |
1267 | GenericValue Interpreter::executeTruncInst(Value *SrcVal, Type *DstTy, |
1268 | ExecutionContext &SF) { |
1269 | GenericValue Dest, Src = getOperandValue(V: SrcVal, SF); |
1270 | Type *SrcTy = SrcVal->getType(); |
1271 | if (SrcTy->isVectorTy()) { |
1272 | Type *DstVecTy = DstTy->getScalarType(); |
1273 | unsigned DBitWidth = cast<IntegerType>(Val: DstVecTy)->getBitWidth(); |
1274 | unsigned NumElts = Src.AggregateVal.size(); |
1275 | // the sizes of src and dst vectors must be equal |
1276 | Dest.AggregateVal.resize(new_size: NumElts); |
1277 | for (unsigned i = 0; i < NumElts; i++) |
1278 | Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.trunc(width: DBitWidth); |
1279 | } else { |
1280 | IntegerType *DITy = cast<IntegerType>(Val: DstTy); |
1281 | unsigned DBitWidth = DITy->getBitWidth(); |
1282 | Dest.IntVal = Src.IntVal.trunc(width: DBitWidth); |
1283 | } |
1284 | return Dest; |
1285 | } |
1286 | |
1287 | GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy, |
1288 | ExecutionContext &SF) { |
1289 | Type *SrcTy = SrcVal->getType(); |
1290 | GenericValue Dest, Src = getOperandValue(V: SrcVal, SF); |
1291 | if (SrcTy->isVectorTy()) { |
1292 | Type *DstVecTy = DstTy->getScalarType(); |
1293 | unsigned DBitWidth = cast<IntegerType>(Val: DstVecTy)->getBitWidth(); |
1294 | unsigned size = Src.AggregateVal.size(); |
1295 | // the sizes of src and dst vectors must be equal. |
1296 | Dest.AggregateVal.resize(new_size: size); |
1297 | for (unsigned i = 0; i < size; i++) |
1298 | Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.sext(width: DBitWidth); |
1299 | } else { |
1300 | auto *DITy = cast<IntegerType>(Val: DstTy); |
1301 | unsigned DBitWidth = DITy->getBitWidth(); |
1302 | Dest.IntVal = Src.IntVal.sext(width: DBitWidth); |
1303 | } |
1304 | return Dest; |
1305 | } |
1306 | |
1307 | GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy, |
1308 | ExecutionContext &SF) { |
1309 | Type *SrcTy = SrcVal->getType(); |
1310 | GenericValue Dest, Src = getOperandValue(V: SrcVal, SF); |
1311 | if (SrcTy->isVectorTy()) { |
1312 | Type *DstVecTy = DstTy->getScalarType(); |
1313 | unsigned DBitWidth = cast<IntegerType>(Val: DstVecTy)->getBitWidth(); |
1314 | |
1315 | unsigned size = Src.AggregateVal.size(); |
1316 | // the sizes of src and dst vectors must be equal. |
1317 | Dest.AggregateVal.resize(new_size: size); |
1318 | for (unsigned i = 0; i < size; i++) |
1319 | Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.zext(width: DBitWidth); |
1320 | } else { |
1321 | auto *DITy = cast<IntegerType>(Val: DstTy); |
1322 | unsigned DBitWidth = DITy->getBitWidth(); |
1323 | Dest.IntVal = Src.IntVal.zext(width: DBitWidth); |
1324 | } |
1325 | return Dest; |
1326 | } |
1327 | |
1328 | GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, Type *DstTy, |
1329 | ExecutionContext &SF) { |
1330 | GenericValue Dest, Src = getOperandValue(V: SrcVal, SF); |
1331 | |
1332 | if (isa<VectorType>(Val: SrcVal->getType())) { |
1333 | assert(SrcVal->getType()->getScalarType()->isDoubleTy() && |
1334 | DstTy->getScalarType()->isFloatTy() && |
1335 | "Invalid FPTrunc instruction" ); |
1336 | |
1337 | unsigned size = Src.AggregateVal.size(); |
1338 | // the sizes of src and dst vectors must be equal. |
1339 | Dest.AggregateVal.resize(new_size: size); |
1340 | for (unsigned i = 0; i < size; i++) |
1341 | Dest.AggregateVal[i].FloatVal = (float)Src.AggregateVal[i].DoubleVal; |
1342 | } else { |
1343 | assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() && |
1344 | "Invalid FPTrunc instruction" ); |
1345 | Dest.FloatVal = (float)Src.DoubleVal; |
1346 | } |
1347 | |
1348 | return Dest; |
1349 | } |
1350 | |
1351 | GenericValue Interpreter::executeFPExtInst(Value *SrcVal, Type *DstTy, |
1352 | ExecutionContext &SF) { |
1353 | GenericValue Dest, Src = getOperandValue(V: SrcVal, SF); |
1354 | |
1355 | if (isa<VectorType>(Val: SrcVal->getType())) { |
1356 | assert(SrcVal->getType()->getScalarType()->isFloatTy() && |
1357 | DstTy->getScalarType()->isDoubleTy() && "Invalid FPExt instruction" ); |
1358 | |
1359 | unsigned size = Src.AggregateVal.size(); |
1360 | // the sizes of src and dst vectors must be equal. |
1361 | Dest.AggregateVal.resize(new_size: size); |
1362 | for (unsigned i = 0; i < size; i++) |
1363 | Dest.AggregateVal[i].DoubleVal = (double)Src.AggregateVal[i].FloatVal; |
1364 | } else { |
1365 | assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() && |
1366 | "Invalid FPExt instruction" ); |
1367 | Dest.DoubleVal = (double)Src.FloatVal; |
1368 | } |
1369 | |
1370 | return Dest; |
1371 | } |
1372 | |
1373 | GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy, |
1374 | ExecutionContext &SF) { |
1375 | Type *SrcTy = SrcVal->getType(); |
1376 | GenericValue Dest, Src = getOperandValue(V: SrcVal, SF); |
1377 | |
1378 | if (isa<VectorType>(Val: SrcTy)) { |
1379 | Type *DstVecTy = DstTy->getScalarType(); |
1380 | Type *SrcVecTy = SrcTy->getScalarType(); |
1381 | uint32_t DBitWidth = cast<IntegerType>(Val: DstVecTy)->getBitWidth(); |
1382 | unsigned size = Src.AggregateVal.size(); |
1383 | // the sizes of src and dst vectors must be equal. |
1384 | Dest.AggregateVal.resize(new_size: size); |
1385 | |
1386 | if (SrcVecTy->getTypeID() == Type::FloatTyID) { |
1387 | assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToUI instruction" ); |
1388 | for (unsigned i = 0; i < size; i++) |
1389 | Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt( |
1390 | Float: Src.AggregateVal[i].FloatVal, width: DBitWidth); |
1391 | } else { |
1392 | for (unsigned i = 0; i < size; i++) |
1393 | Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt( |
1394 | Double: Src.AggregateVal[i].DoubleVal, width: DBitWidth); |
1395 | } |
1396 | } else { |
1397 | // scalar |
1398 | uint32_t DBitWidth = cast<IntegerType>(Val: DstTy)->getBitWidth(); |
1399 | assert(SrcTy->isFloatingPointTy() && "Invalid FPToUI instruction" ); |
1400 | |
1401 | if (SrcTy->getTypeID() == Type::FloatTyID) |
1402 | Dest.IntVal = APIntOps::RoundFloatToAPInt(Float: Src.FloatVal, width: DBitWidth); |
1403 | else { |
1404 | Dest.IntVal = APIntOps::RoundDoubleToAPInt(Double: Src.DoubleVal, width: DBitWidth); |
1405 | } |
1406 | } |
1407 | |
1408 | return Dest; |
1409 | } |
1410 | |
1411 | GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy, |
1412 | ExecutionContext &SF) { |
1413 | Type *SrcTy = SrcVal->getType(); |
1414 | GenericValue Dest, Src = getOperandValue(V: SrcVal, SF); |
1415 | |
1416 | if (isa<VectorType>(Val: SrcTy)) { |
1417 | Type *DstVecTy = DstTy->getScalarType(); |
1418 | Type *SrcVecTy = SrcTy->getScalarType(); |
1419 | uint32_t DBitWidth = cast<IntegerType>(Val: DstVecTy)->getBitWidth(); |
1420 | unsigned size = Src.AggregateVal.size(); |
1421 | // the sizes of src and dst vectors must be equal |
1422 | Dest.AggregateVal.resize(new_size: size); |
1423 | |
1424 | if (SrcVecTy->getTypeID() == Type::FloatTyID) { |
1425 | assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToSI instruction" ); |
1426 | for (unsigned i = 0; i < size; i++) |
1427 | Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt( |
1428 | Float: Src.AggregateVal[i].FloatVal, width: DBitWidth); |
1429 | } else { |
1430 | for (unsigned i = 0; i < size; i++) |
1431 | Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt( |
1432 | Double: Src.AggregateVal[i].DoubleVal, width: DBitWidth); |
1433 | } |
1434 | } else { |
1435 | // scalar |
1436 | unsigned DBitWidth = cast<IntegerType>(Val: DstTy)->getBitWidth(); |
1437 | assert(SrcTy->isFloatingPointTy() && "Invalid FPToSI instruction" ); |
1438 | |
1439 | if (SrcTy->getTypeID() == Type::FloatTyID) |
1440 | Dest.IntVal = APIntOps::RoundFloatToAPInt(Float: Src.FloatVal, width: DBitWidth); |
1441 | else { |
1442 | Dest.IntVal = APIntOps::RoundDoubleToAPInt(Double: Src.DoubleVal, width: DBitWidth); |
1443 | } |
1444 | } |
1445 | return Dest; |
1446 | } |
1447 | |
1448 | GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy, |
1449 | ExecutionContext &SF) { |
1450 | GenericValue Dest, Src = getOperandValue(V: SrcVal, SF); |
1451 | |
1452 | if (isa<VectorType>(Val: SrcVal->getType())) { |
1453 | Type *DstVecTy = DstTy->getScalarType(); |
1454 | unsigned size = Src.AggregateVal.size(); |
1455 | // the sizes of src and dst vectors must be equal |
1456 | Dest.AggregateVal.resize(new_size: size); |
1457 | |
1458 | if (DstVecTy->getTypeID() == Type::FloatTyID) { |
1459 | assert(DstVecTy->isFloatingPointTy() && "Invalid UIToFP instruction" ); |
1460 | for (unsigned i = 0; i < size; i++) |
1461 | Dest.AggregateVal[i].FloatVal = |
1462 | APIntOps::RoundAPIntToFloat(APIVal: Src.AggregateVal[i].IntVal); |
1463 | } else { |
1464 | for (unsigned i = 0; i < size; i++) |
1465 | Dest.AggregateVal[i].DoubleVal = |
1466 | APIntOps::RoundAPIntToDouble(APIVal: Src.AggregateVal[i].IntVal); |
1467 | } |
1468 | } else { |
1469 | // scalar |
1470 | assert(DstTy->isFloatingPointTy() && "Invalid UIToFP instruction" ); |
1471 | if (DstTy->getTypeID() == Type::FloatTyID) |
1472 | Dest.FloatVal = APIntOps::RoundAPIntToFloat(APIVal: Src.IntVal); |
1473 | else { |
1474 | Dest.DoubleVal = APIntOps::RoundAPIntToDouble(APIVal: Src.IntVal); |
1475 | } |
1476 | } |
1477 | return Dest; |
1478 | } |
1479 | |
1480 | GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy, |
1481 | ExecutionContext &SF) { |
1482 | GenericValue Dest, Src = getOperandValue(V: SrcVal, SF); |
1483 | |
1484 | if (isa<VectorType>(Val: SrcVal->getType())) { |
1485 | Type *DstVecTy = DstTy->getScalarType(); |
1486 | unsigned size = Src.AggregateVal.size(); |
1487 | // the sizes of src and dst vectors must be equal |
1488 | Dest.AggregateVal.resize(new_size: size); |
1489 | |
1490 | if (DstVecTy->getTypeID() == Type::FloatTyID) { |
1491 | assert(DstVecTy->isFloatingPointTy() && "Invalid SIToFP instruction" ); |
1492 | for (unsigned i = 0; i < size; i++) |
1493 | Dest.AggregateVal[i].FloatVal = |
1494 | APIntOps::RoundSignedAPIntToFloat(APIVal: Src.AggregateVal[i].IntVal); |
1495 | } else { |
1496 | for (unsigned i = 0; i < size; i++) |
1497 | Dest.AggregateVal[i].DoubleVal = |
1498 | APIntOps::RoundSignedAPIntToDouble(APIVal: Src.AggregateVal[i].IntVal); |
1499 | } |
1500 | } else { |
1501 | // scalar |
1502 | assert(DstTy->isFloatingPointTy() && "Invalid SIToFP instruction" ); |
1503 | |
1504 | if (DstTy->getTypeID() == Type::FloatTyID) |
1505 | Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(APIVal: Src.IntVal); |
1506 | else { |
1507 | Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(APIVal: Src.IntVal); |
1508 | } |
1509 | } |
1510 | |
1511 | return Dest; |
1512 | } |
1513 | |
1514 | GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, Type *DstTy, |
1515 | ExecutionContext &SF) { |
1516 | uint32_t DBitWidth = cast<IntegerType>(Val: DstTy)->getBitWidth(); |
1517 | GenericValue Dest, Src = getOperandValue(V: SrcVal, SF); |
1518 | assert(SrcVal->getType()->isPointerTy() && "Invalid PtrToInt instruction" ); |
1519 | |
1520 | Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal); |
1521 | return Dest; |
1522 | } |
1523 | |
1524 | GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy, |
1525 | ExecutionContext &SF) { |
1526 | GenericValue Dest, Src = getOperandValue(V: SrcVal, SF); |
1527 | assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction" ); |
1528 | |
1529 | uint32_t PtrSize = getDataLayout().getPointerSizeInBits(); |
1530 | if (PtrSize != Src.IntVal.getBitWidth()) |
1531 | Src.IntVal = Src.IntVal.zextOrTrunc(width: PtrSize); |
1532 | |
1533 | Dest.PointerVal = PointerTy(intptr_t(Src.IntVal.getZExtValue())); |
1534 | return Dest; |
1535 | } |
1536 | |
1537 | GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy, |
1538 | ExecutionContext &SF) { |
1539 | |
1540 | // This instruction supports bitwise conversion of vectors to integers and |
1541 | // to vectors of other types (as long as they have the same size) |
1542 | Type *SrcTy = SrcVal->getType(); |
1543 | GenericValue Dest, Src = getOperandValue(V: SrcVal, SF); |
1544 | |
1545 | if (isa<VectorType>(Val: SrcTy) || isa<VectorType>(Val: DstTy)) { |
1546 | // vector src bitcast to vector dst or vector src bitcast to scalar dst or |
1547 | // scalar src bitcast to vector dst |
1548 | bool isLittleEndian = getDataLayout().isLittleEndian(); |
1549 | GenericValue TempDst, TempSrc, SrcVec; |
1550 | Type *SrcElemTy; |
1551 | Type *DstElemTy; |
1552 | unsigned SrcBitSize; |
1553 | unsigned DstBitSize; |
1554 | unsigned SrcNum; |
1555 | unsigned DstNum; |
1556 | |
1557 | if (isa<VectorType>(Val: SrcTy)) { |
1558 | SrcElemTy = SrcTy->getScalarType(); |
1559 | SrcBitSize = SrcTy->getScalarSizeInBits(); |
1560 | SrcNum = Src.AggregateVal.size(); |
1561 | SrcVec = Src; |
1562 | } else { |
1563 | // if src is scalar value, make it vector <1 x type> |
1564 | SrcElemTy = SrcTy; |
1565 | SrcBitSize = SrcTy->getPrimitiveSizeInBits(); |
1566 | SrcNum = 1; |
1567 | SrcVec.AggregateVal.push_back(x: Src); |
1568 | } |
1569 | |
1570 | if (isa<VectorType>(Val: DstTy)) { |
1571 | DstElemTy = DstTy->getScalarType(); |
1572 | DstBitSize = DstTy->getScalarSizeInBits(); |
1573 | DstNum = (SrcNum * SrcBitSize) / DstBitSize; |
1574 | } else { |
1575 | DstElemTy = DstTy; |
1576 | DstBitSize = DstTy->getPrimitiveSizeInBits(); |
1577 | DstNum = 1; |
1578 | } |
1579 | |
1580 | if (SrcNum * SrcBitSize != DstNum * DstBitSize) |
1581 | llvm_unreachable("Invalid BitCast" ); |
1582 | |
1583 | // If src is floating point, cast to integer first. |
1584 | TempSrc.AggregateVal.resize(new_size: SrcNum); |
1585 | if (SrcElemTy->isFloatTy()) { |
1586 | for (unsigned i = 0; i < SrcNum; i++) |
1587 | TempSrc.AggregateVal[i].IntVal = |
1588 | APInt::floatToBits(V: SrcVec.AggregateVal[i].FloatVal); |
1589 | |
1590 | } else if (SrcElemTy->isDoubleTy()) { |
1591 | for (unsigned i = 0; i < SrcNum; i++) |
1592 | TempSrc.AggregateVal[i].IntVal = |
1593 | APInt::doubleToBits(V: SrcVec.AggregateVal[i].DoubleVal); |
1594 | } else if (SrcElemTy->isIntegerTy()) { |
1595 | for (unsigned i = 0; i < SrcNum; i++) |
1596 | TempSrc.AggregateVal[i].IntVal = SrcVec.AggregateVal[i].IntVal; |
1597 | } else { |
1598 | // Pointers are not allowed as the element type of vector. |
1599 | llvm_unreachable("Invalid Bitcast" ); |
1600 | } |
1601 | |
1602 | // now TempSrc is integer type vector |
1603 | if (DstNum < SrcNum) { |
1604 | // Example: bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64> |
1605 | unsigned Ratio = SrcNum / DstNum; |
1606 | unsigned SrcElt = 0; |
1607 | for (unsigned i = 0; i < DstNum; i++) { |
1608 | GenericValue Elt; |
1609 | Elt.IntVal = 0; |
1610 | Elt.IntVal = Elt.IntVal.zext(width: DstBitSize); |
1611 | unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize * (Ratio - 1); |
1612 | for (unsigned j = 0; j < Ratio; j++) { |
1613 | APInt Tmp; |
1614 | Tmp = Tmp.zext(width: SrcBitSize); |
1615 | Tmp = TempSrc.AggregateVal[SrcElt++].IntVal; |
1616 | Tmp = Tmp.zext(width: DstBitSize); |
1617 | Tmp <<= ShiftAmt; |
1618 | ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; |
1619 | Elt.IntVal |= Tmp; |
1620 | } |
1621 | TempDst.AggregateVal.push_back(x: Elt); |
1622 | } |
1623 | } else { |
1624 | // Example: bitcast <2 x i64> <i64 0, i64 1> to <4 x i32> |
1625 | unsigned Ratio = DstNum / SrcNum; |
1626 | for (unsigned i = 0; i < SrcNum; i++) { |
1627 | unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize * (Ratio - 1); |
1628 | for (unsigned j = 0; j < Ratio; j++) { |
1629 | GenericValue Elt; |
1630 | Elt.IntVal = Elt.IntVal.zext(width: SrcBitSize); |
1631 | Elt.IntVal = TempSrc.AggregateVal[i].IntVal; |
1632 | Elt.IntVal.lshrInPlace(ShiftAmt); |
1633 | // it could be DstBitSize == SrcBitSize, so check it |
1634 | if (DstBitSize < SrcBitSize) |
1635 | Elt.IntVal = Elt.IntVal.trunc(width: DstBitSize); |
1636 | ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; |
1637 | TempDst.AggregateVal.push_back(x: Elt); |
1638 | } |
1639 | } |
1640 | } |
1641 | |
1642 | // convert result from integer to specified type |
1643 | if (isa<VectorType>(Val: DstTy)) { |
1644 | if (DstElemTy->isDoubleTy()) { |
1645 | Dest.AggregateVal.resize(new_size: DstNum); |
1646 | for (unsigned i = 0; i < DstNum; i++) |
1647 | Dest.AggregateVal[i].DoubleVal = |
1648 | TempDst.AggregateVal[i].IntVal.bitsToDouble(); |
1649 | } else if (DstElemTy->isFloatTy()) { |
1650 | Dest.AggregateVal.resize(new_size: DstNum); |
1651 | for (unsigned i = 0; i < DstNum; i++) |
1652 | Dest.AggregateVal[i].FloatVal = |
1653 | TempDst.AggregateVal[i].IntVal.bitsToFloat(); |
1654 | } else { |
1655 | Dest = TempDst; |
1656 | } |
1657 | } else { |
1658 | if (DstElemTy->isDoubleTy()) |
1659 | Dest.DoubleVal = TempDst.AggregateVal[0].IntVal.bitsToDouble(); |
1660 | else if (DstElemTy->isFloatTy()) { |
1661 | Dest.FloatVal = TempDst.AggregateVal[0].IntVal.bitsToFloat(); |
1662 | } else { |
1663 | Dest.IntVal = TempDst.AggregateVal[0].IntVal; |
1664 | } |
1665 | } |
1666 | } else { // if (isa<VectorType>(SrcTy)) || isa<VectorType>(DstTy)) |
1667 | |
1668 | // scalar src bitcast to scalar dst |
1669 | if (DstTy->isPointerTy()) { |
1670 | assert(SrcTy->isPointerTy() && "Invalid BitCast" ); |
1671 | Dest.PointerVal = Src.PointerVal; |
1672 | } else if (DstTy->isIntegerTy()) { |
1673 | if (SrcTy->isFloatTy()) |
1674 | Dest.IntVal = APInt::floatToBits(V: Src.FloatVal); |
1675 | else if (SrcTy->isDoubleTy()) { |
1676 | Dest.IntVal = APInt::doubleToBits(V: Src.DoubleVal); |
1677 | } else if (SrcTy->isIntegerTy()) { |
1678 | Dest.IntVal = Src.IntVal; |
1679 | } else { |
1680 | llvm_unreachable("Invalid BitCast" ); |
1681 | } |
1682 | } else if (DstTy->isFloatTy()) { |
1683 | if (SrcTy->isIntegerTy()) |
1684 | Dest.FloatVal = Src.IntVal.bitsToFloat(); |
1685 | else { |
1686 | Dest.FloatVal = Src.FloatVal; |
1687 | } |
1688 | } else if (DstTy->isDoubleTy()) { |
1689 | if (SrcTy->isIntegerTy()) |
1690 | Dest.DoubleVal = Src.IntVal.bitsToDouble(); |
1691 | else { |
1692 | Dest.DoubleVal = Src.DoubleVal; |
1693 | } |
1694 | } else { |
1695 | llvm_unreachable("Invalid Bitcast" ); |
1696 | } |
1697 | } |
1698 | |
1699 | return Dest; |
1700 | } |
1701 | |
1702 | void Interpreter::visitTruncInst(TruncInst &I) { |
1703 | ExecutionContext &SF = ECStack.back(); |
1704 | SetValue(V: &I, Val: executeTruncInst(SrcVal: I.getOperand(i_nocapture: 0), DstTy: I.getType(), SF), SF); |
1705 | } |
1706 | |
1707 | void Interpreter::visitSExtInst(SExtInst &I) { |
1708 | ExecutionContext &SF = ECStack.back(); |
1709 | SetValue(V: &I, Val: executeSExtInst(SrcVal: I.getOperand(i_nocapture: 0), DstTy: I.getType(), SF), SF); |
1710 | } |
1711 | |
1712 | void Interpreter::visitZExtInst(ZExtInst &I) { |
1713 | ExecutionContext &SF = ECStack.back(); |
1714 | SetValue(V: &I, Val: executeZExtInst(SrcVal: I.getOperand(i_nocapture: 0), DstTy: I.getType(), SF), SF); |
1715 | } |
1716 | |
1717 | void Interpreter::visitFPTruncInst(FPTruncInst &I) { |
1718 | ExecutionContext &SF = ECStack.back(); |
1719 | SetValue(V: &I, Val: executeFPTruncInst(SrcVal: I.getOperand(i_nocapture: 0), DstTy: I.getType(), SF), SF); |
1720 | } |
1721 | |
1722 | void Interpreter::visitFPExtInst(FPExtInst &I) { |
1723 | ExecutionContext &SF = ECStack.back(); |
1724 | SetValue(V: &I, Val: executeFPExtInst(SrcVal: I.getOperand(i_nocapture: 0), DstTy: I.getType(), SF), SF); |
1725 | } |
1726 | |
1727 | void Interpreter::visitUIToFPInst(UIToFPInst &I) { |
1728 | ExecutionContext &SF = ECStack.back(); |
1729 | SetValue(V: &I, Val: executeUIToFPInst(SrcVal: I.getOperand(i_nocapture: 0), DstTy: I.getType(), SF), SF); |
1730 | } |
1731 | |
1732 | void Interpreter::visitSIToFPInst(SIToFPInst &I) { |
1733 | ExecutionContext &SF = ECStack.back(); |
1734 | SetValue(V: &I, Val: executeSIToFPInst(SrcVal: I.getOperand(i_nocapture: 0), DstTy: I.getType(), SF), SF); |
1735 | } |
1736 | |
1737 | void Interpreter::visitFPToUIInst(FPToUIInst &I) { |
1738 | ExecutionContext &SF = ECStack.back(); |
1739 | SetValue(V: &I, Val: executeFPToUIInst(SrcVal: I.getOperand(i_nocapture: 0), DstTy: I.getType(), SF), SF); |
1740 | } |
1741 | |
1742 | void Interpreter::visitFPToSIInst(FPToSIInst &I) { |
1743 | ExecutionContext &SF = ECStack.back(); |
1744 | SetValue(V: &I, Val: executeFPToSIInst(SrcVal: I.getOperand(i_nocapture: 0), DstTy: I.getType(), SF), SF); |
1745 | } |
1746 | |
1747 | void Interpreter::visitPtrToIntInst(PtrToIntInst &I) { |
1748 | ExecutionContext &SF = ECStack.back(); |
1749 | SetValue(V: &I, Val: executePtrToIntInst(SrcVal: I.getOperand(i_nocapture: 0), DstTy: I.getType(), SF), SF); |
1750 | } |
1751 | |
1752 | void Interpreter::visitIntToPtrInst(IntToPtrInst &I) { |
1753 | ExecutionContext &SF = ECStack.back(); |
1754 | SetValue(V: &I, Val: executeIntToPtrInst(SrcVal: I.getOperand(i_nocapture: 0), DstTy: I.getType(), SF), SF); |
1755 | } |
1756 | |
1757 | void Interpreter::visitBitCastInst(BitCastInst &I) { |
1758 | ExecutionContext &SF = ECStack.back(); |
1759 | SetValue(V: &I, Val: executeBitCastInst(SrcVal: I.getOperand(i_nocapture: 0), DstTy: I.getType(), SF), SF); |
1760 | } |
1761 | |
1762 | #define IMPLEMENT_VAARG(TY) \ |
1763 | case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break |
1764 | |
1765 | void Interpreter::visitVAArgInst(VAArgInst &I) { |
1766 | ExecutionContext &SF = ECStack.back(); |
1767 | |
1768 | // Get the incoming valist parameter. LLI treats the valist as a |
1769 | // (ec-stack-depth var-arg-index) pair. |
1770 | GenericValue VAList = getOperandValue(V: I.getOperand(i_nocapture: 0), SF); |
1771 | GenericValue Dest; |
1772 | GenericValue Src = ECStack[VAList.UIntPairVal.first] |
1773 | .VarArgs[VAList.UIntPairVal.second]; |
1774 | Type *Ty = I.getType(); |
1775 | switch (Ty->getTypeID()) { |
1776 | case Type::IntegerTyID: |
1777 | Dest.IntVal = Src.IntVal; |
1778 | break; |
1779 | IMPLEMENT_VAARG(Pointer); |
1780 | IMPLEMENT_VAARG(Float); |
1781 | IMPLEMENT_VAARG(Double); |
1782 | default: |
1783 | dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n" ; |
1784 | llvm_unreachable(nullptr); |
1785 | } |
1786 | |
1787 | // Set the Value of this Instruction. |
1788 | SetValue(V: &I, Val: Dest, SF); |
1789 | |
1790 | // Move the pointer to the next vararg. |
1791 | ++VAList.UIntPairVal.second; |
1792 | } |
1793 | |
1794 | void Interpreter::(ExtractElementInst &I) { |
1795 | ExecutionContext &SF = ECStack.back(); |
1796 | GenericValue Src1 = getOperandValue(V: I.getOperand(i_nocapture: 0), SF); |
1797 | GenericValue Src2 = getOperandValue(V: I.getOperand(i_nocapture: 1), SF); |
1798 | GenericValue Dest; |
1799 | |
1800 | Type *Ty = I.getType(); |
1801 | const unsigned indx = unsigned(Src2.IntVal.getZExtValue()); |
1802 | |
1803 | if(Src1.AggregateVal.size() > indx) { |
1804 | switch (Ty->getTypeID()) { |
1805 | default: |
1806 | dbgs() << "Unhandled destination type for extractelement instruction: " |
1807 | << *Ty << "\n" ; |
1808 | llvm_unreachable(nullptr); |
1809 | break; |
1810 | case Type::IntegerTyID: |
1811 | Dest.IntVal = Src1.AggregateVal[indx].IntVal; |
1812 | break; |
1813 | case Type::FloatTyID: |
1814 | Dest.FloatVal = Src1.AggregateVal[indx].FloatVal; |
1815 | break; |
1816 | case Type::DoubleTyID: |
1817 | Dest.DoubleVal = Src1.AggregateVal[indx].DoubleVal; |
1818 | break; |
1819 | } |
1820 | } else { |
1821 | dbgs() << "Invalid index in extractelement instruction\n" ; |
1822 | } |
1823 | |
1824 | SetValue(V: &I, Val: Dest, SF); |
1825 | } |
1826 | |
1827 | void Interpreter::visitInsertElementInst(InsertElementInst &I) { |
1828 | ExecutionContext &SF = ECStack.back(); |
1829 | VectorType *Ty = cast<VectorType>(Val: I.getType()); |
1830 | |
1831 | GenericValue Src1 = getOperandValue(V: I.getOperand(i_nocapture: 0), SF); |
1832 | GenericValue Src2 = getOperandValue(V: I.getOperand(i_nocapture: 1), SF); |
1833 | GenericValue Src3 = getOperandValue(V: I.getOperand(i_nocapture: 2), SF); |
1834 | GenericValue Dest; |
1835 | |
1836 | Type *TyContained = Ty->getElementType(); |
1837 | |
1838 | const unsigned indx = unsigned(Src3.IntVal.getZExtValue()); |
1839 | Dest.AggregateVal = Src1.AggregateVal; |
1840 | |
1841 | if(Src1.AggregateVal.size() <= indx) |
1842 | llvm_unreachable("Invalid index in insertelement instruction" ); |
1843 | switch (TyContained->getTypeID()) { |
1844 | default: |
1845 | llvm_unreachable("Unhandled dest type for insertelement instruction" ); |
1846 | case Type::IntegerTyID: |
1847 | Dest.AggregateVal[indx].IntVal = Src2.IntVal; |
1848 | break; |
1849 | case Type::FloatTyID: |
1850 | Dest.AggregateVal[indx].FloatVal = Src2.FloatVal; |
1851 | break; |
1852 | case Type::DoubleTyID: |
1853 | Dest.AggregateVal[indx].DoubleVal = Src2.DoubleVal; |
1854 | break; |
1855 | } |
1856 | SetValue(V: &I, Val: Dest, SF); |
1857 | } |
1858 | |
1859 | void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){ |
1860 | ExecutionContext &SF = ECStack.back(); |
1861 | |
1862 | VectorType *Ty = cast<VectorType>(Val: I.getType()); |
1863 | |
1864 | GenericValue Src1 = getOperandValue(V: I.getOperand(i_nocapture: 0), SF); |
1865 | GenericValue Src2 = getOperandValue(V: I.getOperand(i_nocapture: 1), SF); |
1866 | GenericValue Dest; |
1867 | |
1868 | // There is no need to check types of src1 and src2, because the compiled |
1869 | // bytecode can't contain different types for src1 and src2 for a |
1870 | // shufflevector instruction. |
1871 | |
1872 | Type *TyContained = Ty->getElementType(); |
1873 | unsigned src1Size = (unsigned)Src1.AggregateVal.size(); |
1874 | unsigned src2Size = (unsigned)Src2.AggregateVal.size(); |
1875 | unsigned src3Size = I.getShuffleMask().size(); |
1876 | |
1877 | Dest.AggregateVal.resize(new_size: src3Size); |
1878 | |
1879 | switch (TyContained->getTypeID()) { |
1880 | default: |
1881 | llvm_unreachable("Unhandled dest type for insertelement instruction" ); |
1882 | break; |
1883 | case Type::IntegerTyID: |
1884 | for( unsigned i=0; i<src3Size; i++) { |
1885 | unsigned j = std::max(a: 0, b: I.getMaskValue(Elt: i)); |
1886 | if(j < src1Size) |
1887 | Dest.AggregateVal[i].IntVal = Src1.AggregateVal[j].IntVal; |
1888 | else if(j < src1Size + src2Size) |
1889 | Dest.AggregateVal[i].IntVal = Src2.AggregateVal[j-src1Size].IntVal; |
1890 | else |
1891 | // The selector may not be greater than sum of lengths of first and |
1892 | // second operands and llasm should not allow situation like |
1893 | // %tmp = shufflevector <2 x i32> <i32 3, i32 4>, <2 x i32> undef, |
1894 | // <2 x i32> < i32 0, i32 5 >, |
1895 | // where i32 5 is invalid, but let it be additional check here: |
1896 | llvm_unreachable("Invalid mask in shufflevector instruction" ); |
1897 | } |
1898 | break; |
1899 | case Type::FloatTyID: |
1900 | for( unsigned i=0; i<src3Size; i++) { |
1901 | unsigned j = std::max(a: 0, b: I.getMaskValue(Elt: i)); |
1902 | if(j < src1Size) |
1903 | Dest.AggregateVal[i].FloatVal = Src1.AggregateVal[j].FloatVal; |
1904 | else if(j < src1Size + src2Size) |
1905 | Dest.AggregateVal[i].FloatVal = Src2.AggregateVal[j-src1Size].FloatVal; |
1906 | else |
1907 | llvm_unreachable("Invalid mask in shufflevector instruction" ); |
1908 | } |
1909 | break; |
1910 | case Type::DoubleTyID: |
1911 | for( unsigned i=0; i<src3Size; i++) { |
1912 | unsigned j = std::max(a: 0, b: I.getMaskValue(Elt: i)); |
1913 | if(j < src1Size) |
1914 | Dest.AggregateVal[i].DoubleVal = Src1.AggregateVal[j].DoubleVal; |
1915 | else if(j < src1Size + src2Size) |
1916 | Dest.AggregateVal[i].DoubleVal = |
1917 | Src2.AggregateVal[j-src1Size].DoubleVal; |
1918 | else |
1919 | llvm_unreachable("Invalid mask in shufflevector instruction" ); |
1920 | } |
1921 | break; |
1922 | } |
1923 | SetValue(V: &I, Val: Dest, SF); |
1924 | } |
1925 | |
1926 | void Interpreter::(ExtractValueInst &I) { |
1927 | ExecutionContext &SF = ECStack.back(); |
1928 | Value *Agg = I.getAggregateOperand(); |
1929 | GenericValue Dest; |
1930 | GenericValue Src = getOperandValue(V: Agg, SF); |
1931 | |
1932 | ExtractValueInst::idx_iterator IdxBegin = I.idx_begin(); |
1933 | unsigned Num = I.getNumIndices(); |
1934 | GenericValue *pSrc = &Src; |
1935 | |
1936 | for (unsigned i = 0 ; i < Num; ++i) { |
1937 | pSrc = &pSrc->AggregateVal[*IdxBegin]; |
1938 | ++IdxBegin; |
1939 | } |
1940 | |
1941 | Type *IndexedType = ExtractValueInst::getIndexedType(Agg: Agg->getType(), Idxs: I.getIndices()); |
1942 | switch (IndexedType->getTypeID()) { |
1943 | default: |
1944 | llvm_unreachable("Unhandled dest type for extractelement instruction" ); |
1945 | break; |
1946 | case Type::IntegerTyID: |
1947 | Dest.IntVal = pSrc->IntVal; |
1948 | break; |
1949 | case Type::FloatTyID: |
1950 | Dest.FloatVal = pSrc->FloatVal; |
1951 | break; |
1952 | case Type::DoubleTyID: |
1953 | Dest.DoubleVal = pSrc->DoubleVal; |
1954 | break; |
1955 | case Type::ArrayTyID: |
1956 | case Type::StructTyID: |
1957 | case Type::FixedVectorTyID: |
1958 | case Type::ScalableVectorTyID: |
1959 | Dest.AggregateVal = pSrc->AggregateVal; |
1960 | break; |
1961 | case Type::PointerTyID: |
1962 | Dest.PointerVal = pSrc->PointerVal; |
1963 | break; |
1964 | } |
1965 | |
1966 | SetValue(V: &I, Val: Dest, SF); |
1967 | } |
1968 | |
1969 | void Interpreter::visitInsertValueInst(InsertValueInst &I) { |
1970 | |
1971 | ExecutionContext &SF = ECStack.back(); |
1972 | Value *Agg = I.getAggregateOperand(); |
1973 | |
1974 | GenericValue Src1 = getOperandValue(V: Agg, SF); |
1975 | GenericValue Src2 = getOperandValue(V: I.getOperand(i_nocapture: 1), SF); |
1976 | GenericValue Dest = Src1; // Dest is a slightly changed Src1 |
1977 | |
1978 | ExtractValueInst::idx_iterator IdxBegin = I.idx_begin(); |
1979 | unsigned Num = I.getNumIndices(); |
1980 | |
1981 | GenericValue *pDest = &Dest; |
1982 | for (unsigned i = 0 ; i < Num; ++i) { |
1983 | pDest = &pDest->AggregateVal[*IdxBegin]; |
1984 | ++IdxBegin; |
1985 | } |
1986 | // pDest points to the target value in the Dest now |
1987 | |
1988 | Type *IndexedType = ExtractValueInst::getIndexedType(Agg: Agg->getType(), Idxs: I.getIndices()); |
1989 | |
1990 | switch (IndexedType->getTypeID()) { |
1991 | default: |
1992 | llvm_unreachable("Unhandled dest type for insertelement instruction" ); |
1993 | break; |
1994 | case Type::IntegerTyID: |
1995 | pDest->IntVal = Src2.IntVal; |
1996 | break; |
1997 | case Type::FloatTyID: |
1998 | pDest->FloatVal = Src2.FloatVal; |
1999 | break; |
2000 | case Type::DoubleTyID: |
2001 | pDest->DoubleVal = Src2.DoubleVal; |
2002 | break; |
2003 | case Type::ArrayTyID: |
2004 | case Type::StructTyID: |
2005 | case Type::FixedVectorTyID: |
2006 | case Type::ScalableVectorTyID: |
2007 | pDest->AggregateVal = Src2.AggregateVal; |
2008 | break; |
2009 | case Type::PointerTyID: |
2010 | pDest->PointerVal = Src2.PointerVal; |
2011 | break; |
2012 | } |
2013 | |
2014 | SetValue(V: &I, Val: Dest, SF); |
2015 | } |
2016 | |
2017 | GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE, |
2018 | ExecutionContext &SF) { |
2019 | switch (CE->getOpcode()) { |
2020 | case Instruction::Trunc: |
2021 | return executeTruncInst(SrcVal: CE->getOperand(i_nocapture: 0), DstTy: CE->getType(), SF); |
2022 | case Instruction::ZExt: |
2023 | return executeZExtInst(SrcVal: CE->getOperand(i_nocapture: 0), DstTy: CE->getType(), SF); |
2024 | case Instruction::SExt: |
2025 | return executeSExtInst(SrcVal: CE->getOperand(i_nocapture: 0), DstTy: CE->getType(), SF); |
2026 | case Instruction::FPTrunc: |
2027 | return executeFPTruncInst(SrcVal: CE->getOperand(i_nocapture: 0), DstTy: CE->getType(), SF); |
2028 | case Instruction::FPExt: |
2029 | return executeFPExtInst(SrcVal: CE->getOperand(i_nocapture: 0), DstTy: CE->getType(), SF); |
2030 | case Instruction::UIToFP: |
2031 | return executeUIToFPInst(SrcVal: CE->getOperand(i_nocapture: 0), DstTy: CE->getType(), SF); |
2032 | case Instruction::SIToFP: |
2033 | return executeSIToFPInst(SrcVal: CE->getOperand(i_nocapture: 0), DstTy: CE->getType(), SF); |
2034 | case Instruction::FPToUI: |
2035 | return executeFPToUIInst(SrcVal: CE->getOperand(i_nocapture: 0), DstTy: CE->getType(), SF); |
2036 | case Instruction::FPToSI: |
2037 | return executeFPToSIInst(SrcVal: CE->getOperand(i_nocapture: 0), DstTy: CE->getType(), SF); |
2038 | case Instruction::PtrToInt: |
2039 | return executePtrToIntInst(SrcVal: CE->getOperand(i_nocapture: 0), DstTy: CE->getType(), SF); |
2040 | case Instruction::IntToPtr: |
2041 | return executeIntToPtrInst(SrcVal: CE->getOperand(i_nocapture: 0), DstTy: CE->getType(), SF); |
2042 | case Instruction::BitCast: |
2043 | return executeBitCastInst(SrcVal: CE->getOperand(i_nocapture: 0), DstTy: CE->getType(), SF); |
2044 | case Instruction::GetElementPtr: |
2045 | return executeGEPOperation(Ptr: CE->getOperand(i_nocapture: 0), I: gep_type_begin(GEP: CE), |
2046 | E: gep_type_end(GEP: CE), SF); |
2047 | case Instruction::FCmp: |
2048 | case Instruction::ICmp: |
2049 | return executeCmpInst(predicate: CE->getPredicate(), |
2050 | Src1: getOperandValue(V: CE->getOperand(i_nocapture: 0), SF), |
2051 | Src2: getOperandValue(V: CE->getOperand(i_nocapture: 1), SF), |
2052 | Ty: CE->getOperand(i_nocapture: 0)->getType()); |
2053 | case Instruction::Select: |
2054 | return executeSelectInst(Src1: getOperandValue(V: CE->getOperand(i_nocapture: 0), SF), |
2055 | Src2: getOperandValue(V: CE->getOperand(i_nocapture: 1), SF), |
2056 | Src3: getOperandValue(V: CE->getOperand(i_nocapture: 2), SF), |
2057 | Ty: CE->getOperand(i_nocapture: 0)->getType()); |
2058 | default : |
2059 | break; |
2060 | } |
2061 | |
2062 | // The cases below here require a GenericValue parameter for the result |
2063 | // so we initialize one, compute it and then return it. |
2064 | GenericValue Op0 = getOperandValue(V: CE->getOperand(i_nocapture: 0), SF); |
2065 | GenericValue Op1 = getOperandValue(V: CE->getOperand(i_nocapture: 1), SF); |
2066 | GenericValue Dest; |
2067 | Type * Ty = CE->getOperand(i_nocapture: 0)->getType(); |
2068 | switch (CE->getOpcode()) { |
2069 | case Instruction::Add: Dest.IntVal = Op0.IntVal + Op1.IntVal; break; |
2070 | case Instruction::Sub: Dest.IntVal = Op0.IntVal - Op1.IntVal; break; |
2071 | case Instruction::Mul: Dest.IntVal = Op0.IntVal * Op1.IntVal; break; |
2072 | case Instruction::FAdd: executeFAddInst(Dest, Src1: Op0, Src2: Op1, Ty); break; |
2073 | case Instruction::FSub: executeFSubInst(Dest, Src1: Op0, Src2: Op1, Ty); break; |
2074 | case Instruction::FMul: executeFMulInst(Dest, Src1: Op0, Src2: Op1, Ty); break; |
2075 | case Instruction::FDiv: executeFDivInst(Dest, Src1: Op0, Src2: Op1, Ty); break; |
2076 | case Instruction::FRem: executeFRemInst(Dest, Src1: Op0, Src2: Op1, Ty); break; |
2077 | case Instruction::SDiv: Dest.IntVal = Op0.IntVal.sdiv(RHS: Op1.IntVal); break; |
2078 | case Instruction::UDiv: Dest.IntVal = Op0.IntVal.udiv(RHS: Op1.IntVal); break; |
2079 | case Instruction::URem: Dest.IntVal = Op0.IntVal.urem(RHS: Op1.IntVal); break; |
2080 | case Instruction::SRem: Dest.IntVal = Op0.IntVal.srem(RHS: Op1.IntVal); break; |
2081 | case Instruction::And: Dest.IntVal = Op0.IntVal & Op1.IntVal; break; |
2082 | case Instruction::Or: Dest.IntVal = Op0.IntVal | Op1.IntVal; break; |
2083 | case Instruction::Xor: Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break; |
2084 | case Instruction::Shl: |
2085 | Dest.IntVal = Op0.IntVal.shl(shiftAmt: Op1.IntVal.getZExtValue()); |
2086 | break; |
2087 | case Instruction::LShr: |
2088 | Dest.IntVal = Op0.IntVal.lshr(shiftAmt: Op1.IntVal.getZExtValue()); |
2089 | break; |
2090 | case Instruction::AShr: |
2091 | Dest.IntVal = Op0.IntVal.ashr(ShiftAmt: Op1.IntVal.getZExtValue()); |
2092 | break; |
2093 | default: |
2094 | dbgs() << "Unhandled ConstantExpr: " << *CE << "\n" ; |
2095 | llvm_unreachable("Unhandled ConstantExpr" ); |
2096 | } |
2097 | return Dest; |
2098 | } |
2099 | |
2100 | GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) { |
2101 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Val: V)) { |
2102 | return getConstantExprValue(CE, SF); |
2103 | } else if (Constant *CPV = dyn_cast<Constant>(Val: V)) { |
2104 | return getConstantValue(C: CPV); |
2105 | } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Val: V)) { |
2106 | return PTOGV(P: getPointerToGlobal(GV)); |
2107 | } else { |
2108 | return SF.Values[V]; |
2109 | } |
2110 | } |
2111 | |
2112 | //===----------------------------------------------------------------------===// |
2113 | // Dispatch and Execution Code |
2114 | //===----------------------------------------------------------------------===// |
2115 | |
2116 | //===----------------------------------------------------------------------===// |
2117 | // callFunction - Execute the specified function... |
2118 | // |
2119 | void Interpreter::callFunction(Function *F, ArrayRef<GenericValue> ArgVals) { |
2120 | assert((ECStack.empty() || !ECStack.back().Caller || |
2121 | ECStack.back().Caller->arg_size() == ArgVals.size()) && |
2122 | "Incorrect number of arguments passed into function call!" ); |
2123 | // Make a new stack frame... and fill it in. |
2124 | ECStack.emplace_back(); |
2125 | ExecutionContext &StackFrame = ECStack.back(); |
2126 | StackFrame.CurFunction = F; |
2127 | |
2128 | // Special handling for external functions. |
2129 | if (F->isDeclaration()) { |
2130 | GenericValue Result = callExternalFunction (F, ArgVals); |
2131 | // Simulate a 'ret' instruction of the appropriate type. |
2132 | popStackAndReturnValueToCaller (RetTy: F->getReturnType (), Result); |
2133 | return; |
2134 | } |
2135 | |
2136 | // Get pointers to first LLVM BB & Instruction in function. |
2137 | StackFrame.CurBB = &F->front(); |
2138 | StackFrame.CurInst = StackFrame.CurBB->begin(); |
2139 | |
2140 | // Run through the function arguments and initialize their values... |
2141 | assert((ArgVals.size() == F->arg_size() || |
2142 | (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&& |
2143 | "Invalid number of values passed to function invocation!" ); |
2144 | |
2145 | // Handle non-varargs arguments... |
2146 | unsigned i = 0; |
2147 | for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); |
2148 | AI != E; ++AI, ++i) |
2149 | SetValue(V: &*AI, Val: ArgVals[i], SF&: StackFrame); |
2150 | |
2151 | // Handle varargs arguments... |
2152 | StackFrame.VarArgs.assign(first: ArgVals.begin()+i, last: ArgVals.end()); |
2153 | } |
2154 | |
2155 | |
2156 | void Interpreter::run() { |
2157 | while (!ECStack.empty()) { |
2158 | // Interpret a single instruction & increment the "PC". |
2159 | ExecutionContext &SF = ECStack.back(); // Current stack frame |
2160 | Instruction &I = *SF.CurInst++; // Increment before execute |
2161 | |
2162 | // Track the number of dynamic instructions executed. |
2163 | ++NumDynamicInsts; |
2164 | |
2165 | LLVM_DEBUG(dbgs() << "About to interpret: " << I << "\n" ); |
2166 | visit(I); // Dispatch to one of the visit* methods... |
2167 | } |
2168 | } |
2169 | |