1/*
2 * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef MacroAssembler_h
27#define MacroAssembler_h
28
29#if ENABLE(ASSEMBLER)
30
31#if CPU(ARM_THUMB2)
32#include "MacroAssemblerARMv7.h"
33namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
34
35#elif CPU(ARM64)
36#include "MacroAssemblerARM64.h"
37namespace JSC { typedef MacroAssemblerARM64 MacroAssemblerBase; };
38
39#elif CPU(ARM_TRADITIONAL)
40#include "MacroAssemblerARM.h"
41namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
42
43#elif CPU(MIPS)
44#include "MacroAssemblerMIPS.h"
45namespace JSC {
46typedef MacroAssemblerMIPS MacroAssemblerBase;
47};
48
49#elif CPU(X86)
50#include "MacroAssemblerX86.h"
51namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
52
53#elif CPU(X86_64)
54#include "MacroAssemblerX86_64.h"
55namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
56
57#elif CPU(SH4)
58#include "MacroAssemblerSH4.h"
59namespace JSC {
60typedef MacroAssemblerSH4 MacroAssemblerBase;
61};
62
63#else
64#error "The MacroAssembler is not supported on this platform."
65#endif
66
67namespace JSC {
68
69class MacroAssembler : public MacroAssemblerBase {
70public:
71
72 static constexpr RegisterID nextRegister(RegisterID reg)
73 {
74 return static_cast<RegisterID>(reg + 1);
75 }
76
77 static constexpr FPRegisterID nextFPRegister(FPRegisterID reg)
78 {
79 return static_cast<FPRegisterID>(reg + 1);
80 }
81
82 static constexpr unsigned numberOfRegisters()
83 {
84 return lastRegister() - firstRegister() + 1;
85 }
86
87 static constexpr unsigned registerIndex(RegisterID reg)
88 {
89 return reg - firstRegister();
90 }
91
92 static constexpr unsigned numberOfFPRegisters()
93 {
94 return lastFPRegister() - firstFPRegister() + 1;
95 }
96
97 static constexpr unsigned fpRegisterIndex(FPRegisterID reg)
98 {
99 return reg - firstFPRegister();
100 }
101
102 static constexpr unsigned registerIndex(FPRegisterID reg)
103 {
104 return fpRegisterIndex(reg) + numberOfRegisters();
105 }
106
107 static constexpr unsigned totalNumberOfRegisters()
108 {
109 return numberOfRegisters() + numberOfFPRegisters();
110 }
111
112 using MacroAssemblerBase::pop;
113 using MacroAssemblerBase::jump;
114 using MacroAssemblerBase::branch32;
115 using MacroAssemblerBase::compare32;
116 using MacroAssemblerBase::move;
117 using MacroAssemblerBase::add32;
118 using MacroAssemblerBase::mul32;
119 using MacroAssemblerBase::and32;
120 using MacroAssemblerBase::branchAdd32;
121 using MacroAssemblerBase::branchMul32;
122#if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(X86_64)
123 using MacroAssemblerBase::branchPtr;
124#endif
125 using MacroAssemblerBase::branchSub32;
126 using MacroAssemblerBase::lshift32;
127 using MacroAssemblerBase::or32;
128 using MacroAssemblerBase::rshift32;
129 using MacroAssemblerBase::store32;
130 using MacroAssemblerBase::sub32;
131 using MacroAssemblerBase::urshift32;
132 using MacroAssemblerBase::xor32;
133
134 static bool isPtrAlignedAddressOffset(ptrdiff_t value)
135 {
136 return value == static_cast<int32_t>(value);
137 }
138
139 static const double twoToThe32; // This is super useful for some double code.
140
141 // Utilities used by the DFG JIT.
142#if ENABLE(DFG_JIT)
143 using MacroAssemblerBase::invert;
144
145 static DoubleCondition invert(DoubleCondition cond)
146 {
147 switch (cond) {
148 case DoubleEqual:
149 return DoubleNotEqualOrUnordered;
150 case DoubleNotEqual:
151 return DoubleEqualOrUnordered;
152 case DoubleGreaterThan:
153 return DoubleLessThanOrEqualOrUnordered;
154 case DoubleGreaterThanOrEqual:
155 return DoubleLessThanOrUnordered;
156 case DoubleLessThan:
157 return DoubleGreaterThanOrEqualOrUnordered;
158 case DoubleLessThanOrEqual:
159 return DoubleGreaterThanOrUnordered;
160 case DoubleEqualOrUnordered:
161 return DoubleNotEqual;
162 case DoubleNotEqualOrUnordered:
163 return DoubleEqual;
164 case DoubleGreaterThanOrUnordered:
165 return DoubleLessThanOrEqual;
166 case DoubleGreaterThanOrEqualOrUnordered:
167 return DoubleLessThan;
168 case DoubleLessThanOrUnordered:
169 return DoubleGreaterThanOrEqual;
170 case DoubleLessThanOrEqualOrUnordered:
171 return DoubleGreaterThan;
172 }
173 RELEASE_ASSERT_NOT_REACHED();
174 return DoubleEqual; // make compiler happy
175 }
176
177 static bool isInvertible(ResultCondition cond)
178 {
179 switch (cond) {
180 case Zero:
181 case NonZero:
182 case Signed:
183 case PositiveOrZero:
184 return true;
185 default:
186 return false;
187 }
188 }
189
190 static ResultCondition invert(ResultCondition cond)
191 {
192 switch (cond) {
193 case Zero:
194 return NonZero;
195 case NonZero:
196 return Zero;
197 case Signed:
198 return PositiveOrZero;
199 case PositiveOrZero:
200 return Signed;
201 default:
202 RELEASE_ASSERT_NOT_REACHED();
203 return Zero; // Make compiler happy for release builds.
204 }
205 }
206
207 static RelationalCondition flip(RelationalCondition cond)
208 {
209 switch (cond) {
210 case Equal:
211 case NotEqual:
212 return cond;
213 case Above:
214 return Below;
215 case AboveOrEqual:
216 return BelowOrEqual;
217 case Below:
218 return Above;
219 case BelowOrEqual:
220 return AboveOrEqual;
221 case GreaterThan:
222 return LessThan;
223 case GreaterThanOrEqual:
224 return LessThanOrEqual;
225 case LessThan:
226 return GreaterThan;
227 case LessThanOrEqual:
228 return GreaterThanOrEqual;
229 }
230
231 RELEASE_ASSERT_NOT_REACHED();
232 return Equal;
233 }
234
235 // True if this:
236 // branch8(cond, value, value)
237 // Is the same as this:
238 // branch32(cond, signExt8(value), signExt8(value))
239 static bool isSigned(RelationalCondition cond)
240 {
241 switch (cond) {
242 case Equal:
243 case NotEqual:
244 case GreaterThan:
245 case GreaterThanOrEqual:
246 case LessThan:
247 case LessThanOrEqual:
248 return true;
249 default:
250 return false;
251 }
252 }
253
254 // True if this:
255 // branch8(cond, value, value)
256 // Is the same as this:
257 // branch32(cond, zeroExt8(value), zeroExt8(value))
258 static bool isUnsigned(RelationalCondition cond)
259 {
260 switch (cond) {
261 case Equal:
262 case NotEqual:
263 case Above:
264 case AboveOrEqual:
265 case Below:
266 case BelowOrEqual:
267 return true;
268 default:
269 return false;
270 }
271 }
272#endif
273
274 // Platform agnostic onvenience functions,
275 // described in terms of other macro assembly methods.
276 void pop()
277 {
278 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister);
279 }
280
281 void peek(RegisterID dest, int index = 0)
282 {
283 loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
284 }
285
286 Address addressForPoke(int index)
287 {
288 return Address(stackPointerRegister, (index * sizeof(void*)));
289 }
290
291 void poke(RegisterID src, int index = 0)
292 {
293 storePtr(src, addressForPoke(index));
294 }
295
296 void poke(TrustedImm32 value, int index = 0)
297 {
298 store32(value, addressForPoke(index));
299 }
300
301 void poke(TrustedImmPtr imm, int index = 0)
302 {
303 storePtr(imm, addressForPoke(index));
304 }
305
306#if !CPU(ARM64)
307 void pushToSave(RegisterID src)
308 {
309 push(src);
310 }
311 void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
312 {
313 push(imm);
314 }
315 void popToRestore(RegisterID dest)
316 {
317 pop(dest);
318 }
319 void pushToSave(FPRegisterID src)
320 {
321 subPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
322 storeDouble(src, stackPointerRegister);
323 }
324 void popToRestore(FPRegisterID dest)
325 {
326 loadDouble(stackPointerRegister, dest);
327 addPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
328 }
329
330 static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); }
331#endif // !CPU(ARM64)
332
333#if CPU(X86_64) || CPU(ARM64)
334 void peek64(RegisterID dest, int index = 0)
335 {
336 load64(Address(stackPointerRegister, (index * sizeof(void*))), dest);
337 }
338
339 void poke(TrustedImm64 value, int index = 0)
340 {
341 store64(value, addressForPoke(index));
342 }
343
344 void poke64(RegisterID src, int index = 0)
345 {
346 store64(src, addressForPoke(index));
347 }
348#endif
349
350#if CPU(MIPS)
351 void poke(FPRegisterID src, int index = 0)
352 {
353 ASSERT(!(index & 1));
354 storeDouble(src, addressForPoke(index));
355 }
356#endif
357
358 // Immediate shifts only have 5 controllable bits
359 // so we'll consider them safe for now.
360 TrustedImm32 trustedImm32ForShift(Imm32 imm)
361 {
362 return TrustedImm32(imm.asTrustedImm32().m_value & 31);
363 }
364
365 // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
366 void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
367 {
368 branchPtr(cond, op1, imm).linkTo(target, this);
369 }
370 void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target)
371 {
372 branchPtr(cond, op1, imm).linkTo(target, this);
373 }
374
375 void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target)
376 {
377 branch32(cond, op1, op2).linkTo(target, this);
378 }
379
380 void branch32(RelationalCondition cond, RegisterID op1, TrustedImm32 imm, Label target)
381 {
382 branch32(cond, op1, imm).linkTo(target, this);
383 }
384
385 void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target)
386 {
387 branch32(cond, op1, imm).linkTo(target, this);
388 }
389
390 void branch32(RelationalCondition cond, RegisterID left, Address right, Label target)
391 {
392 branch32(cond, left, right).linkTo(target, this);
393 }
394
395 Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right)
396 {
397 return branch32(commute(cond), right, left);
398 }
399
400 Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right)
401 {
402 return branch32(commute(cond), right, left);
403 }
404
405 void compare32(RelationalCondition cond, Imm32 left, RegisterID right, RegisterID dest)
406 {
407 compare32(commute(cond), right, left, dest);
408 }
409
410 void branchTestPtr(ResultCondition cond, RegisterID reg, Label target)
411 {
412 branchTestPtr(cond, reg).linkTo(target, this);
413 }
414
415#if !CPU(ARM_THUMB2) && !CPU(ARM64)
416 PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
417 {
418 return PatchableJump(branchPtr(cond, left, right));
419 }
420
421 PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
422 {
423 return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
424 }
425
426 PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
427 {
428 return PatchableJump(branch32WithPatch(cond, left, dataLabel, initialRightValue));
429 }
430
431#if !CPU(ARM_TRADITIONAL)
432 PatchableJump patchableJump()
433 {
434 return PatchableJump(jump());
435 }
436
437 PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
438 {
439 return PatchableJump(branchTest32(cond, reg, mask));
440 }
441
442 PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
443 {
444 return PatchableJump(branch32(cond, reg, imm));
445 }
446
447 PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm)
448 {
449 return PatchableJump(branch32(cond, address, imm));
450 }
451#endif
452#endif
453
454 void jump(Label target)
455 {
456 jump().linkTo(target, this);
457 }
458
459 // Commute a relational condition, returns a new condition that will produce
460 // the same results given the same inputs but with their positions exchanged.
461 static RelationalCondition commute(RelationalCondition condition)
462 {
463 switch (condition) {
464 case Above:
465 return Below;
466 case AboveOrEqual:
467 return BelowOrEqual;
468 case Below:
469 return Above;
470 case BelowOrEqual:
471 return AboveOrEqual;
472 case GreaterThan:
473 return LessThan;
474 case GreaterThanOrEqual:
475 return LessThanOrEqual;
476 case LessThan:
477 return GreaterThan;
478 case LessThanOrEqual:
479 return GreaterThanOrEqual;
480 default:
481 break;
482 }
483
484 ASSERT(condition == Equal || condition == NotEqual);
485 return condition;
486 }
487
488 void oops()
489 {
490 abortWithReason(B3Oops);
491 }
492
493 // B3 has additional pseudo-opcodes for returning, when it wants to signal that the return
494 // consumes some register in some way.
495 void ret32(RegisterID) { ret(); }
496 void ret64(RegisterID) { ret(); }
497 void retFloat(FPRegisterID) { ret(); }
498 void retDouble(FPRegisterID) { ret(); }
499
500 static const unsigned BlindingModulus = 64;
501 bool shouldConsiderBlinding()
502 {
503 return !(random() & (BlindingModulus - 1));
504 }
505
506 // Ptr methods
507 // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
508 // FIXME: should this use a test for 32-bitness instead of this specific exception?
509#if !CPU(X86_64) && !CPU(ARM64)
510 void addPtr(Address src, RegisterID dest)
511 {
512 add32(src, dest);
513 }
514
515 void addPtr(AbsoluteAddress src, RegisterID dest)
516 {
517 add32(src, dest);
518 }
519
520 void addPtr(RegisterID src, RegisterID dest)
521 {
522 add32(src, dest);
523 }
524
525 void addPtr(RegisterID left, RegisterID right, RegisterID dest)
526 {
527 add32(left, right, dest);
528 }
529
530 void addPtr(TrustedImm32 imm, RegisterID srcDest)
531 {
532 add32(imm, srcDest);
533 }
534
535 void addPtr(TrustedImmPtr imm, RegisterID dest)
536 {
537 add32(TrustedImm32(imm), dest);
538 }
539
540 void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
541 {
542 add32(imm, src, dest);
543 }
544
545 void addPtr(TrustedImm32 imm, AbsoluteAddress address)
546 {
547 add32(imm, address);
548 }
549
550 void andPtr(RegisterID src, RegisterID dest)
551 {
552 and32(src, dest);
553 }
554
555 void andPtr(TrustedImm32 imm, RegisterID srcDest)
556 {
557 and32(imm, srcDest);
558 }
559
560 void andPtr(TrustedImmPtr imm, RegisterID srcDest)
561 {
562 and32(TrustedImm32(imm), srcDest);
563 }
564
565 void lshiftPtr(Imm32 imm, RegisterID srcDest)
566 {
567 lshift32(trustedImm32ForShift(imm), srcDest);
568 }
569
570 void rshiftPtr(Imm32 imm, RegisterID srcDest)
571 {
572 rshift32(trustedImm32ForShift(imm), srcDest);
573 }
574
575 void urshiftPtr(Imm32 imm, RegisterID srcDest)
576 {
577 urshift32(trustedImm32ForShift(imm), srcDest);
578 }
579
580 void negPtr(RegisterID dest)
581 {
582 neg32(dest);
583 }
584
585 void orPtr(RegisterID src, RegisterID dest)
586 {
587 or32(src, dest);
588 }
589
590 void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
591 {
592 or32(op1, op2, dest);
593 }
594
595 void orPtr(TrustedImmPtr imm, RegisterID dest)
596 {
597 or32(TrustedImm32(imm), dest);
598 }
599
600 void orPtr(TrustedImm32 imm, RegisterID dest)
601 {
602 or32(imm, dest);
603 }
604
605 void subPtr(RegisterID src, RegisterID dest)
606 {
607 sub32(src, dest);
608 }
609
610 void subPtr(TrustedImm32 imm, RegisterID dest)
611 {
612 sub32(imm, dest);
613 }
614
615 void subPtr(TrustedImmPtr imm, RegisterID dest)
616 {
617 sub32(TrustedImm32(imm), dest);
618 }
619
620 void xorPtr(RegisterID src, RegisterID dest)
621 {
622 xor32(src, dest);
623 }
624
625 void xorPtr(TrustedImm32 imm, RegisterID srcDest)
626 {
627 xor32(imm, srcDest);
628 }
629
630
631 void loadPtr(ImplicitAddress address, RegisterID dest)
632 {
633 load32(address, dest);
634 }
635
636 void loadPtr(BaseIndex address, RegisterID dest)
637 {
638 load32(address, dest);
639 }
640
641 void loadPtr(const void* address, RegisterID dest)
642 {
643 load32(address, dest);
644 }
645
646 DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
647 {
648 return load32WithAddressOffsetPatch(address, dest);
649 }
650
651 DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
652 {
653 return load32WithCompactAddressOffsetPatch(address, dest);
654 }
655
656 void move(ImmPtr imm, RegisterID dest)
657 {
658 move(Imm32(imm.asTrustedImmPtr()), dest);
659 }
660
661 void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
662 {
663 compare32(cond, left, right, dest);
664 }
665
666 void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
667 {
668 compare32(cond, left, right, dest);
669 }
670
671 void storePtr(RegisterID src, ImplicitAddress address)
672 {
673 store32(src, address);
674 }
675
676 void storePtr(RegisterID src, BaseIndex address)
677 {
678 store32(src, address);
679 }
680
681 void storePtr(RegisterID src, void* address)
682 {
683 store32(src, address);
684 }
685
686 void storePtr(TrustedImmPtr imm, ImplicitAddress address)
687 {
688 store32(TrustedImm32(imm), address);
689 }
690
691 void storePtr(ImmPtr imm, Address address)
692 {
693 store32(Imm32(imm.asTrustedImmPtr()), address);
694 }
695
696 void storePtr(TrustedImmPtr imm, void* address)
697 {
698 store32(TrustedImm32(imm), address);
699 }
700
701 void storePtr(TrustedImm32 imm, ImplicitAddress address)
702 {
703 store32(imm, address);
704 }
705
706 void storePtr(TrustedImmPtr imm, BaseIndex address)
707 {
708 store32(TrustedImm32(imm), address);
709 }
710
711 DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
712 {
713 return store32WithAddressOffsetPatch(src, address);
714 }
715
716 Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
717 {
718 return branch32(cond, left, right);
719 }
720
721 Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
722 {
723 return branch32(cond, left, TrustedImm32(right));
724 }
725
726 Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
727 {
728 return branch32(cond, left, Imm32(right.asTrustedImmPtr()));
729 }
730
731 Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
732 {
733 return branch32(cond, left, right);
734 }
735
736 Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
737 {
738 return branch32(cond, left, right);
739 }
740
741 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
742 {
743 return branch32(cond, left, right);
744 }
745
746 Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
747 {
748 return branch32(cond, left, TrustedImm32(right));
749 }
750
751 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right)
752 {
753 return branch32(cond, left, TrustedImm32(right));
754 }
755
756 Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
757 {
758 return branchSub32(cond, src, dest);
759 }
760
761 Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
762 {
763 return branchTest32(cond, reg, mask);
764 }
765
766 Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
767 {
768 return branchTest32(cond, reg, mask);
769 }
770
771 Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
772 {
773 return branchTest32(cond, address, mask);
774 }
775
776 Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
777 {
778 return branchTest32(cond, address, mask);
779 }
780
781 Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
782 {
783 return branchAdd32(cond, src, dest);
784 }
785
786 Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
787 {
788 return branchSub32(cond, imm, dest);
789 }
790 using MacroAssemblerBase::branchTest8;
791 Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
792 {
793 return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
794 }
795
796#else // !CPU(X86_64)
797
798 void addPtr(RegisterID src, RegisterID dest)
799 {
800 add64(src, dest);
801 }
802
803 void addPtr(RegisterID left, RegisterID right, RegisterID dest)
804 {
805 add64(left, right, dest);
806 }
807
808 void addPtr(Address src, RegisterID dest)
809 {
810 add64(src, dest);
811 }
812
813 void addPtr(TrustedImm32 imm, RegisterID srcDest)
814 {
815 add64(imm, srcDest);
816 }
817
818 void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
819 {
820 add64(imm, src, dest);
821 }
822
823 void addPtr(TrustedImm32 imm, Address address)
824 {
825 add64(imm, address);
826 }
827
828 void addPtr(AbsoluteAddress src, RegisterID dest)
829 {
830 add64(src, dest);
831 }
832
833 void addPtr(TrustedImmPtr imm, RegisterID dest)
834 {
835 add64(TrustedImm64(imm), dest);
836 }
837
838 void addPtr(TrustedImm32 imm, AbsoluteAddress address)
839 {
840 add64(imm, address);
841 }
842
843 void andPtr(RegisterID src, RegisterID dest)
844 {
845 and64(src, dest);
846 }
847
848 void andPtr(TrustedImm32 imm, RegisterID srcDest)
849 {
850 and64(imm, srcDest);
851 }
852
853 void andPtr(TrustedImmPtr imm, RegisterID srcDest)
854 {
855 and64(imm, srcDest);
856 }
857
858 void lshiftPtr(Imm32 imm, RegisterID srcDest)
859 {
860 lshift64(trustedImm32ForShift(imm), srcDest);
861 }
862
863 void rshiftPtr(Imm32 imm, RegisterID srcDest)
864 {
865 rshift64(trustedImm32ForShift(imm), srcDest);
866 }
867
868 void urshiftPtr(Imm32 imm, RegisterID srcDest)
869 {
870 urshift64(trustedImm32ForShift(imm), srcDest);
871 }
872
873 void negPtr(RegisterID dest)
874 {
875 neg64(dest);
876 }
877
878 void orPtr(RegisterID src, RegisterID dest)
879 {
880 or64(src, dest);
881 }
882
883 void orPtr(TrustedImm32 imm, RegisterID dest)
884 {
885 or64(imm, dest);
886 }
887
888 void orPtr(TrustedImmPtr imm, RegisterID dest)
889 {
890 or64(TrustedImm64(imm), dest);
891 }
892
893 void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
894 {
895 or64(op1, op2, dest);
896 }
897
898 void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
899 {
900 or64(imm, src, dest);
901 }
902
903 void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst)
904 {
905 rotateRight64(imm, srcDst);
906 }
907
908 void subPtr(RegisterID src, RegisterID dest)
909 {
910 sub64(src, dest);
911 }
912
913 void subPtr(TrustedImm32 imm, RegisterID dest)
914 {
915 sub64(imm, dest);
916 }
917
918 void subPtr(TrustedImmPtr imm, RegisterID dest)
919 {
920 sub64(TrustedImm64(imm), dest);
921 }
922
923 void xorPtr(RegisterID src, RegisterID dest)
924 {
925 xor64(src, dest);
926 }
927
928 void xorPtr(RegisterID src, Address dest)
929 {
930 xor64(src, dest);
931 }
932
933 void xorPtr(TrustedImm32 imm, RegisterID srcDest)
934 {
935 xor64(imm, srcDest);
936 }
937
938 void loadPtr(ImplicitAddress address, RegisterID dest)
939 {
940 load64(address, dest);
941 }
942
943 void loadPtr(BaseIndex address, RegisterID dest)
944 {
945 load64(address, dest);
946 }
947
948 void loadPtr(const void* address, RegisterID dest)
949 {
950 load64(address, dest);
951 }
952
953 DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
954 {
955 return load64WithAddressOffsetPatch(address, dest);
956 }
957
958 DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
959 {
960 return load64WithCompactAddressOffsetPatch(address, dest);
961 }
962
963 void storePtr(RegisterID src, ImplicitAddress address)
964 {
965 store64(src, address);
966 }
967
968 void storePtr(RegisterID src, BaseIndex address)
969 {
970 store64(src, address);
971 }
972
973 void storePtr(RegisterID src, void* address)
974 {
975 store64(src, address);
976 }
977
978 void storePtr(TrustedImmPtr imm, ImplicitAddress address)
979 {
980 store64(TrustedImm64(imm), address);
981 }
982
983 void storePtr(TrustedImm32 imm, ImplicitAddress address)
984 {
985 store64(imm, address);
986 }
987
988 void storePtr(TrustedImmPtr imm, BaseIndex address)
989 {
990 store64(TrustedImm64(imm), address);
991 }
992
993 DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
994 {
995 return store64WithAddressOffsetPatch(src, address);
996 }
997
998 void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
999 {
1000 compare64(cond, left, right, dest);
1001 }
1002
1003 void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1004 {
1005 compare64(cond, left, right, dest);
1006 }
1007
1008 void testPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
1009 {
1010 test64(cond, reg, mask, dest);
1011 }
1012
1013 void testPtr(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
1014 {
1015 test64(cond, reg, mask, dest);
1016 }
1017
1018 Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
1019 {
1020 return branch64(cond, left, right);
1021 }
1022
1023 Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
1024 {
1025 return branch64(cond, left, TrustedImm64(right));
1026 }
1027
1028 Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
1029 {
1030 return branch64(cond, left, right);
1031 }
1032
1033 Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
1034 {
1035 return branch64(cond, left, right);
1036 }
1037
1038 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1039 {
1040 return branch64(cond, left, right);
1041 }
1042
1043 Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
1044 {
1045 return branch64(cond, left, TrustedImm64(right));
1046 }
1047
1048 Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
1049 {
1050 return branchTest64(cond, reg, mask);
1051 }
1052
1053 Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1054 {
1055 return branchTest64(cond, reg, mask);
1056 }
1057
1058 Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1059 {
1060 return branchTest64(cond, address, mask);
1061 }
1062
1063 Jump branchTestPtr(ResultCondition cond, Address address, RegisterID reg)
1064 {
1065 return branchTest64(cond, address, reg);
1066 }
1067
1068 Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1069 {
1070 return branchTest64(cond, address, mask);
1071 }
1072
1073 Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1074 {
1075 return branchTest64(cond, address, mask);
1076 }
1077
1078 Jump branchAddPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1079 {
1080 return branchAdd64(cond, imm, dest);
1081 }
1082
1083 Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
1084 {
1085 return branchAdd64(cond, src, dest);
1086 }
1087
1088 Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1089 {
1090 return branchSub64(cond, imm, dest);
1091 }
1092
1093 Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
1094 {
1095 return branchSub64(cond, src, dest);
1096 }
1097
1098 Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
1099 {
1100 return branchSub64(cond, src1, src2, dest);
1101 }
1102
1103 using MacroAssemblerBase::and64;
1104 using MacroAssemblerBase::convertInt32ToDouble;
1105 using MacroAssemblerBase::store64;
1106 bool shouldBlindDouble(double value)
1107 {
1108 // Don't trust NaN or +/-Infinity
1109 if (!std::isfinite(value))
1110 return shouldConsiderBlinding();
1111
1112 // Try to force normalisation, and check that there's no change
1113 // in the bit pattern
1114 if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value))
1115 return shouldConsiderBlinding();
1116
1117 value = fabs(value);
1118 // Only allow a limited set of fractional components
1119 double scaledValue = value * 8;
1120 if (scaledValue / 8 != value)
1121 return shouldConsiderBlinding();
1122 double frac = scaledValue - floor(scaledValue);
1123 if (frac != 0.0)
1124 return shouldConsiderBlinding();
1125
1126 return value > 0xff;
1127 }
1128
1129 bool shouldBlindPointerForSpecificArch(uintptr_t value)
1130 {
1131 if (sizeof(void*) == 4)
1132 return shouldBlindForSpecificArch(static_cast<uint32_t>(value));
1133 return shouldBlindForSpecificArch(static_cast<uint64_t>(value));
1134 }
1135
1136 bool shouldBlind(ImmPtr imm)
1137 {
1138 if (!canBlind())
1139 return false;
1140
1141#if ENABLE(FORCED_JIT_BLINDING)
1142 UNUSED_PARAM(imm);
1143 // Debug always blind all constants, if only so we know
1144 // if we've broken blinding during patch development.
1145 return true;
1146#endif
1147
1148 // First off we'll special case common, "safe" values to avoid hurting
1149 // performance too much
1150 uintptr_t value = imm.asTrustedImmPtr().asIntptr();
1151 switch (value) {
1152 case 0xffff:
1153 case 0xffffff:
1154 case 0xffffffffL:
1155 case 0xffffffffffL:
1156 case 0xffffffffffffL:
1157 case 0xffffffffffffffL:
1158 case 0xffffffffffffffffL:
1159 return false;
1160 default: {
1161 if (value <= 0xff)
1162 return false;
1163 if (~value <= 0xff)
1164 return false;
1165 }
1166 }
1167
1168 if (!shouldConsiderBlinding())
1169 return false;
1170
1171 return shouldBlindPointerForSpecificArch(value);
1172 }
1173
1174 struct RotatedImmPtr {
1175 RotatedImmPtr(uintptr_t v1, uint8_t v2)
1176 : value(v1)
1177 , rotation(v2)
1178 {
1179 }
1180 TrustedImmPtr value;
1181 TrustedImm32 rotation;
1182 };
1183
1184 RotatedImmPtr rotationBlindConstant(ImmPtr imm)
1185 {
1186 uint8_t rotation = random() % (sizeof(void*) * 8);
1187 uintptr_t value = imm.asTrustedImmPtr().asIntptr();
1188 value = (value << rotation) | (value >> (sizeof(void*) * 8 - rotation));
1189 return RotatedImmPtr(value, rotation);
1190 }
1191
1192 void loadRotationBlindedConstant(RotatedImmPtr constant, RegisterID dest)
1193 {
1194 move(constant.value, dest);
1195 rotateRightPtr(constant.rotation, dest);
1196 }
1197
1198 bool shouldBlind(Imm64 imm)
1199 {
1200#if ENABLE(FORCED_JIT_BLINDING)
1201 UNUSED_PARAM(imm);
1202 // Debug always blind all constants, if only so we know
1203 // if we've broken blinding during patch development.
1204 return true;
1205#endif
1206
1207 // First off we'll special case common, "safe" values to avoid hurting
1208 // performance too much
1209 uint64_t value = imm.asTrustedImm64().m_value;
1210 switch (value) {
1211 case 0xffff:
1212 case 0xffffff:
1213 case 0xffffffffL:
1214 case 0xffffffffffL:
1215 case 0xffffffffffffL:
1216 case 0xffffffffffffffL:
1217 case 0xffffffffffffffffL:
1218 return false;
1219 default: {
1220 if (value <= 0xff)
1221 return false;
1222 if (~value <= 0xff)
1223 return false;
1224
1225 JSValue jsValue = JSValue::decode(value);
1226 if (jsValue.isInt32())
1227 return shouldBlind(Imm32(jsValue.asInt32()));
1228 if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble()))
1229 return false;
1230
1231 if (!shouldBlindDouble(bitwise_cast<double>(value)))
1232 return false;
1233 }
1234 }
1235
1236 if (!shouldConsiderBlinding())
1237 return false;
1238
1239 return shouldBlindForSpecificArch(value);
1240 }
1241
1242 struct RotatedImm64 {
1243 RotatedImm64(uint64_t v1, uint8_t v2)
1244 : value(v1)
1245 , rotation(v2)
1246 {
1247 }
1248 TrustedImm64 value;
1249 TrustedImm32 rotation;
1250 };
1251
1252 RotatedImm64 rotationBlindConstant(Imm64 imm)
1253 {
1254 uint8_t rotation = random() % (sizeof(int64_t) * 8);
1255 uint64_t value = imm.asTrustedImm64().m_value;
1256 value = (value << rotation) | (value >> (sizeof(int64_t) * 8 - rotation));
1257 return RotatedImm64(value, rotation);
1258 }
1259
1260 void loadRotationBlindedConstant(RotatedImm64 constant, RegisterID dest)
1261 {
1262 move(constant.value, dest);
1263 rotateRight64(constant.rotation, dest);
1264 }
1265
1266 void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
1267 {
1268 if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
1269 RegisterID scratchRegister = scratchRegisterForBlinding();
1270 loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
1271 convertInt32ToDouble(scratchRegister, dest);
1272 } else
1273 convertInt32ToDouble(imm.asTrustedImm32(), dest);
1274 }
1275
1276 void move(ImmPtr imm, RegisterID dest)
1277 {
1278 if (shouldBlind(imm))
1279 loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
1280 else
1281 move(imm.asTrustedImmPtr(), dest);
1282 }
1283
1284 void move(Imm64 imm, RegisterID dest)
1285 {
1286 if (shouldBlind(imm))
1287 loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
1288 else
1289 move(imm.asTrustedImm64(), dest);
1290 }
1291
1292 void and64(Imm32 imm, RegisterID dest)
1293 {
1294 if (shouldBlind(imm)) {
1295 BlindedImm32 key = andBlindedConstant(imm);
1296 and64(key.value1, dest);
1297 and64(key.value2, dest);
1298 } else
1299 and64(imm.asTrustedImm32(), dest);
1300 }
1301
1302 Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
1303 {
1304 if (shouldBlind(right) && haveScratchRegisterForBlinding()) {
1305 RegisterID scratchRegister = scratchRegisterForBlinding();
1306 loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
1307 return branchPtr(cond, left, scratchRegister);
1308 }
1309 return branchPtr(cond, left, right.asTrustedImmPtr());
1310 }
1311
1312 void storePtr(ImmPtr imm, Address dest)
1313 {
1314 if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
1315 RegisterID scratchRegister = scratchRegisterForBlinding();
1316 loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
1317 storePtr(scratchRegister, dest);
1318 } else
1319 storePtr(imm.asTrustedImmPtr(), dest);
1320 }
1321
1322 void store64(Imm64 imm, Address dest)
1323 {
1324 if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
1325 RegisterID scratchRegister = scratchRegisterForBlinding();
1326 loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
1327 store64(scratchRegister, dest);
1328 } else
1329 store64(imm.asTrustedImm64(), dest);
1330 }
1331
1332#endif // !CPU(X86_64)
1333
1334#if ENABLE(B3_JIT)
1335 // We should implement this the right way eventually, but for now, it's fine because it arises so
1336 // infrequently.
1337 void compareDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
1338 {
1339 move(TrustedImm32(0), dest);
1340 Jump falseCase = branchDouble(invert(cond), left, right);
1341 move(TrustedImm32(1), dest);
1342 falseCase.link(this);
1343 }
1344 void compareFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
1345 {
1346 move(TrustedImm32(0), dest);
1347 Jump falseCase = branchFloat(invert(cond), left, right);
1348 move(TrustedImm32(1), dest);
1349 falseCase.link(this);
1350 }
1351#endif
1352
1353 void lea(Address address, RegisterID dest)
1354 {
1355 addPtr(TrustedImm32(address.offset), address.base, dest);
1356 }
1357
1358 bool shouldBlind(Imm32 imm)
1359 {
1360#if ENABLE(FORCED_JIT_BLINDING)
1361 UNUSED_PARAM(imm);
1362 // Debug always blind all constants, if only so we know
1363 // if we've broken blinding during patch development.
1364 return true;
1365#else // ENABLE(FORCED_JIT_BLINDING)
1366
1367 // First off we'll special case common, "safe" values to avoid hurting
1368 // performance too much
1369 uint32_t value = imm.asTrustedImm32().m_value;
1370 switch (value) {
1371 case 0xffff:
1372 case 0xffffff:
1373 case 0xffffffff:
1374 return false;
1375 default:
1376 if (value <= 0xff)
1377 return false;
1378 if (~value <= 0xff)
1379 return false;
1380 }
1381
1382 if (!shouldConsiderBlinding())
1383 return false;
1384
1385 return shouldBlindForSpecificArch(value);
1386#endif // ENABLE(FORCED_JIT_BLINDING)
1387 }
1388
1389 struct BlindedImm32 {
1390 BlindedImm32(int32_t v1, int32_t v2)
1391 : value1(v1)
1392 , value2(v2)
1393 {
1394 }
1395 TrustedImm32 value1;
1396 TrustedImm32 value2;
1397 };
1398
1399 uint32_t keyForConstant(uint32_t value, uint32_t& mask)
1400 {
1401 uint32_t key = random();
1402 if (value <= 0xff)
1403 mask = 0xff;
1404 else if (value <= 0xffff)
1405 mask = 0xffff;
1406 else if (value <= 0xffffff)
1407 mask = 0xffffff;
1408 else
1409 mask = 0xffffffff;
1410 return key & mask;
1411 }
1412
1413 uint32_t keyForConstant(uint32_t value)
1414 {
1415 uint32_t mask = 0;
1416 return keyForConstant(value, mask);
1417 }
1418
1419 BlindedImm32 xorBlindConstant(Imm32 imm)
1420 {
1421 uint32_t baseValue = imm.asTrustedImm32().m_value;
1422 uint32_t key = keyForConstant(baseValue);
1423 return BlindedImm32(baseValue ^ key, key);
1424 }
1425
1426 BlindedImm32 additionBlindedConstant(Imm32 imm)
1427 {
1428 // The addition immediate may be used as a pointer offset. Keep aligned based on "imm".
1429 static uint32_t maskTable[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff };
1430
1431 uint32_t baseValue = imm.asTrustedImm32().m_value;
1432 uint32_t key = keyForConstant(baseValue) & maskTable[baseValue & 3];
1433 if (key > baseValue)
1434 key = key - baseValue;
1435 return BlindedImm32(baseValue - key, key);
1436 }
1437
1438 BlindedImm32 andBlindedConstant(Imm32 imm)
1439 {
1440 uint32_t baseValue = imm.asTrustedImm32().m_value;
1441 uint32_t mask = 0;
1442 uint32_t key = keyForConstant(baseValue, mask);
1443 ASSERT((baseValue & mask) == baseValue);
1444 return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask);
1445 }
1446
1447 BlindedImm32 orBlindedConstant(Imm32 imm)
1448 {
1449 uint32_t baseValue = imm.asTrustedImm32().m_value;
1450 uint32_t mask = 0;
1451 uint32_t key = keyForConstant(baseValue, mask);
1452 ASSERT((baseValue & mask) == baseValue);
1453 return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask);
1454 }
1455
1456 void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest)
1457 {
1458 move(constant.value1, dest);
1459 xor32(constant.value2, dest);
1460 }
1461
1462 void add32(Imm32 imm, RegisterID dest)
1463 {
1464 if (shouldBlind(imm)) {
1465 BlindedImm32 key = additionBlindedConstant(imm);
1466 add32(key.value1, dest);
1467 add32(key.value2, dest);
1468 } else
1469 add32(imm.asTrustedImm32(), dest);
1470 }
1471
1472 void add32(Imm32 imm, RegisterID src, RegisterID dest)
1473 {
1474 if (shouldBlind(imm)) {
1475 BlindedImm32 key = additionBlindedConstant(imm);
1476 add32(key.value1, src, dest);
1477 add32(key.value2, dest);
1478 } else
1479 add32(imm.asTrustedImm32(), src, dest);
1480 }
1481
1482 void addPtr(Imm32 imm, RegisterID dest)
1483 {
1484 if (shouldBlind(imm)) {
1485 BlindedImm32 key = additionBlindedConstant(imm);
1486 addPtr(key.value1, dest);
1487 addPtr(key.value2, dest);
1488 } else
1489 addPtr(imm.asTrustedImm32(), dest);
1490 }
1491
1492 void mul32(Imm32 imm, RegisterID src, RegisterID dest)
1493 {
1494 if (shouldBlind(imm)) {
1495 if (src != dest || haveScratchRegisterForBlinding()) {
1496 if (src == dest) {
1497 move(src, scratchRegisterForBlinding());
1498 src = scratchRegisterForBlinding();
1499 }
1500 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1501 mul32(src, dest);
1502 return;
1503 }
1504 // If we don't have a scratch register available for use, we'll just
1505 // place a random number of nops.
1506 uint32_t nopCount = random() & 3;
1507 while (nopCount--)
1508 nop();
1509 }
1510 mul32(imm.asTrustedImm32(), src, dest);
1511 }
1512
1513 void and32(Imm32 imm, RegisterID dest)
1514 {
1515 if (shouldBlind(imm)) {
1516 BlindedImm32 key = andBlindedConstant(imm);
1517 and32(key.value1, dest);
1518 and32(key.value2, dest);
1519 } else
1520 and32(imm.asTrustedImm32(), dest);
1521 }
1522
1523 void andPtr(Imm32 imm, RegisterID dest)
1524 {
1525 if (shouldBlind(imm)) {
1526 BlindedImm32 key = andBlindedConstant(imm);
1527 andPtr(key.value1, dest);
1528 andPtr(key.value2, dest);
1529 } else
1530 andPtr(imm.asTrustedImm32(), dest);
1531 }
1532
1533 void and32(Imm32 imm, RegisterID src, RegisterID dest)
1534 {
1535 if (shouldBlind(imm)) {
1536 if (src == dest)
1537 return and32(imm.asTrustedImm32(), dest);
1538 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1539 and32(src, dest);
1540 } else
1541 and32(imm.asTrustedImm32(), src, dest);
1542 }
1543
1544 void move(Imm32 imm, RegisterID dest)
1545 {
1546 if (shouldBlind(imm))
1547 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1548 else
1549 move(imm.asTrustedImm32(), dest);
1550 }
1551
1552 void or32(Imm32 imm, RegisterID src, RegisterID dest)
1553 {
1554 if (shouldBlind(imm)) {
1555 if (src == dest)
1556 return or32(imm, dest);
1557 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1558 or32(src, dest);
1559 } else
1560 or32(imm.asTrustedImm32(), src, dest);
1561 }
1562
1563 void or32(Imm32 imm, RegisterID dest)
1564 {
1565 if (shouldBlind(imm)) {
1566 BlindedImm32 key = orBlindedConstant(imm);
1567 or32(key.value1, dest);
1568 or32(key.value2, dest);
1569 } else
1570 or32(imm.asTrustedImm32(), dest);
1571 }
1572
1573 void poke(Imm32 value, int index = 0)
1574 {
1575 store32(value, addressForPoke(index));
1576 }
1577
1578 void poke(ImmPtr value, int index = 0)
1579 {
1580 storePtr(value, addressForPoke(index));
1581 }
1582
1583#if CPU(X86_64) || CPU(ARM64)
1584 void poke(Imm64 value, int index = 0)
1585 {
1586 store64(value, addressForPoke(index));
1587 }
1588#endif // CPU(X86_64)
1589
1590 void store32(Imm32 imm, Address dest)
1591 {
1592 if (shouldBlind(imm)) {
1593#if CPU(X86) || CPU(X86_64)
1594 BlindedImm32 blind = xorBlindConstant(imm);
1595 store32(blind.value1, dest);
1596 xor32(blind.value2, dest);
1597#else // CPU(X86) || CPU(X86_64)
1598 if (haveScratchRegisterForBlinding()) {
1599 loadXorBlindedConstant(xorBlindConstant(imm), scratchRegisterForBlinding());
1600 store32(scratchRegisterForBlinding(), dest);
1601 } else {
1602 // If we don't have a scratch register available for use, we'll just
1603 // place a random number of nops.
1604 uint32_t nopCount = random() & 3;
1605 while (nopCount--)
1606 nop();
1607 store32(imm.asTrustedImm32(), dest);
1608 }
1609#endif // CPU(X86) || CPU(X86_64)
1610 } else
1611 store32(imm.asTrustedImm32(), dest);
1612 }
1613
1614 void sub32(Imm32 imm, RegisterID dest)
1615 {
1616 if (shouldBlind(imm)) {
1617 BlindedImm32 key = additionBlindedConstant(imm);
1618 sub32(key.value1, dest);
1619 sub32(key.value2, dest);
1620 } else
1621 sub32(imm.asTrustedImm32(), dest);
1622 }
1623
1624 void subPtr(Imm32 imm, RegisterID dest)
1625 {
1626 if (shouldBlind(imm)) {
1627 BlindedImm32 key = additionBlindedConstant(imm);
1628 subPtr(key.value1, dest);
1629 subPtr(key.value2, dest);
1630 } else
1631 subPtr(imm.asTrustedImm32(), dest);
1632 }
1633
1634 void xor32(Imm32 imm, RegisterID src, RegisterID dest)
1635 {
1636 if (shouldBlind(imm)) {
1637 BlindedImm32 blind = xorBlindConstant(imm);
1638 xor32(blind.value1, src, dest);
1639 xor32(blind.value2, dest);
1640 } else
1641 xor32(imm.asTrustedImm32(), src, dest);
1642 }
1643
1644 void xor32(Imm32 imm, RegisterID dest)
1645 {
1646 if (shouldBlind(imm)) {
1647 BlindedImm32 blind = xorBlindConstant(imm);
1648 xor32(blind.value1, dest);
1649 xor32(blind.value2, dest);
1650 } else
1651 xor32(imm.asTrustedImm32(), dest);
1652 }
1653
1654 Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right)
1655 {
1656 if (shouldBlind(right)) {
1657 if (haveScratchRegisterForBlinding()) {
1658 loadXorBlindedConstant(xorBlindConstant(right), scratchRegisterForBlinding());
1659 return branch32(cond, left, scratchRegisterForBlinding());
1660 }
1661 // If we don't have a scratch register available for use, we'll just
1662 // place a random number of nops.
1663 uint32_t nopCount = random() & 3;
1664 while (nopCount--)
1665 nop();
1666 return branch32(cond, left, right.asTrustedImm32());
1667 }
1668
1669 return branch32(cond, left, right.asTrustedImm32());
1670 }
1671
1672 void compare32(RelationalCondition cond, RegisterID left, Imm32 right, RegisterID dest)
1673 {
1674 if (shouldBlind(right)) {
1675 if (left != dest || haveScratchRegisterForBlinding()) {
1676 RegisterID blindedConstantReg = dest;
1677 if (left == dest)
1678 blindedConstantReg = scratchRegisterForBlinding();
1679 loadXorBlindedConstant(xorBlindConstant(right), blindedConstantReg);
1680 compare32(cond, left, blindedConstantReg, dest);
1681 return;
1682 }
1683 // If we don't have a scratch register available for use, we'll just
1684 // place a random number of nops.
1685 uint32_t nopCount = random() & 3;
1686 while (nopCount--)
1687 nop();
1688 compare32(cond, left, right.asTrustedImm32(), dest);
1689 return;
1690 }
1691
1692 compare32(cond, left, right.asTrustedImm32(), dest);
1693 }
1694
1695 Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
1696 {
1697 if (src == dest)
1698 ASSERT(haveScratchRegisterForBlinding());
1699
1700 if (shouldBlind(imm)) {
1701 if (src == dest) {
1702 move(src, scratchRegisterForBlinding());
1703 src = scratchRegisterForBlinding();
1704 }
1705 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1706 return branchAdd32(cond, src, dest);
1707 }
1708 return branchAdd32(cond, src, imm.asTrustedImm32(), dest);
1709 }
1710
1711 Jump branchMul32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
1712 {
1713 if (src == dest)
1714 ASSERT(haveScratchRegisterForBlinding());
1715
1716 if (shouldBlind(imm)) {
1717 if (src == dest) {
1718 move(src, scratchRegisterForBlinding());
1719 src = scratchRegisterForBlinding();
1720 }
1721 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1722 return branchMul32(cond, src, dest);
1723 }
1724 return branchMul32(cond, src, imm.asTrustedImm32(), dest);
1725 }
1726
1727 // branchSub32 takes a scratch register as 32 bit platforms make use of this,
1728 // with src == dst, and on x86-32 we don't have a platform scratch register.
1729 Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch)
1730 {
1731 if (shouldBlind(imm)) {
1732 ASSERT(scratch != dest);
1733 ASSERT(scratch != src);
1734 loadXorBlindedConstant(xorBlindConstant(imm), scratch);
1735 return branchSub32(cond, src, scratch, dest);
1736 }
1737 return branchSub32(cond, src, imm.asTrustedImm32(), dest);
1738 }
1739
1740 void lshift32(Imm32 imm, RegisterID dest)
1741 {
1742 lshift32(trustedImm32ForShift(imm), dest);
1743 }
1744
1745 void lshift32(RegisterID src, Imm32 amount, RegisterID dest)
1746 {
1747 lshift32(src, trustedImm32ForShift(amount), dest);
1748 }
1749
1750 void rshift32(Imm32 imm, RegisterID dest)
1751 {
1752 rshift32(trustedImm32ForShift(imm), dest);
1753 }
1754
1755 void rshift32(RegisterID src, Imm32 amount, RegisterID dest)
1756 {
1757 rshift32(src, trustedImm32ForShift(amount), dest);
1758 }
1759
1760 void urshift32(Imm32 imm, RegisterID dest)
1761 {
1762 urshift32(trustedImm32ForShift(imm), dest);
1763 }
1764
1765 void urshift32(RegisterID src, Imm32 amount, RegisterID dest)
1766 {
1767 urshift32(src, trustedImm32ForShift(amount), dest);
1768 }
1769
1770#if ENABLE(MASM_PROBE)
1771 using MacroAssemblerBase::probe;
1772
1773 // Let's you print from your JIT generated code.
1774 // See comments in MacroAssemblerPrinter.h for examples of how to use this.
1775 template<typename... Arguments>
1776 void print(Arguments... args);
1777
1778 void probe(std::function<void (ProbeContext*)>);
1779#endif
1780};
1781
1782} // namespace JSC
1783
1784namespace WTF {
1785
1786class PrintStream;
1787
1788void printInternal(PrintStream&, JSC::MacroAssembler::RelationalCondition);
1789void printInternal(PrintStream&, JSC::MacroAssembler::ResultCondition);
1790void printInternal(PrintStream&, JSC::MacroAssembler::DoubleCondition);
1791
1792} // namespace WTF
1793
1794#else // ENABLE(ASSEMBLER)
1795
1796namespace JSC {
1797
1798// If there is no assembler for this platform, at least allow code to make references to
1799// some of the things it would otherwise define, albeit without giving that code any way
1800// of doing anything useful.
1801class MacroAssembler {
1802private:
1803 MacroAssembler() { }
1804
1805public:
1806
1807 enum RegisterID { NoRegister };
1808 enum FPRegisterID { NoFPRegister };
1809};
1810
1811} // namespace JSC
1812
1813#endif // ENABLE(ASSEMBLER)
1814
1815#endif // MacroAssembler_h
1816