1/*
2 * Copyright (C) 2008, 2012, 2014-2016 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef AbstractMacroAssembler_h
27#define AbstractMacroAssembler_h
28
29#include "AbortReason.h"
30#include "AssemblerBuffer.h"
31#include "CodeLocation.h"
32#include "MacroAssemblerCodeRef.h"
33#include "Options.h"
34#include <wtf/CryptographicallyRandomNumber.h>
35#include <wtf/Noncopyable.h>
36#include <wtf/SharedTask.h>
37#include <wtf/WeakRandom.h>
38
39#if ENABLE(ASSEMBLER)
40
41namespace JSC {
42
43inline bool isARMv7IDIVSupported()
44{
45#if HAVE(ARM_IDIV_INSTRUCTIONS)
46 return true;
47#else
48 return false;
49#endif
50}
51
52inline bool isARM64()
53{
54#if CPU(ARM64)
55 return true;
56#else
57 return false;
58#endif
59}
60
61inline bool isMIPS()
62{
63#if CPU(MIPS)
64 return true;
65#else
66 return false;
67#endif
68}
69
70inline bool isX86()
71{
72#if CPU(X86_64) || CPU(X86)
73 return true;
74#else
75 return false;
76#endif
77}
78
79inline bool isX86_64()
80{
81#if CPU(X86_64)
82 return true;
83#else
84 return false;
85#endif
86}
87
88inline bool optimizeForARMv7IDIVSupported()
89{
90 return isARMv7IDIVSupported() && Options::useArchitectureSpecificOptimizations();
91}
92
93inline bool optimizeForARM64()
94{
95 return isARM64() && Options::useArchitectureSpecificOptimizations();
96}
97
98inline bool optimizeForMIPS()
99{
100 return isMIPS() && Options::useArchitectureSpecificOptimizations();
101}
102
103inline bool optimizeForX86()
104{
105 return isX86() && Options::useArchitectureSpecificOptimizations();
106}
107
108inline bool optimizeForX86_64()
109{
110 return isX86_64() && Options::useArchitectureSpecificOptimizations();
111}
112
113class AllowMacroScratchRegisterUsage;
114class DisallowMacroScratchRegisterUsage;
115class LinkBuffer;
116class Watchpoint;
117namespace DFG {
118struct OSRExit;
119}
120
121template <class AssemblerType, class MacroAssemblerType>
122class AbstractMacroAssembler {
123public:
124 friend class JITWriteBarrierBase;
125 typedef AbstractMacroAssembler<AssemblerType, MacroAssemblerType> AbstractMacroAssemblerType;
126 typedef AssemblerType AssemblerType_T;
127
128 typedef MacroAssemblerCodePtr CodePtr;
129 typedef MacroAssemblerCodeRef CodeRef;
130
131 class Jump;
132
133 typedef typename AssemblerType::RegisterID RegisterID;
134 typedef typename AssemblerType::FPRegisterID FPRegisterID;
135
136 static constexpr RegisterID firstRegister() { return AssemblerType::firstRegister(); }
137 static constexpr RegisterID lastRegister() { return AssemblerType::lastRegister(); }
138
139 static constexpr FPRegisterID firstFPRegister() { return AssemblerType::firstFPRegister(); }
140 static constexpr FPRegisterID lastFPRegister() { return AssemblerType::lastFPRegister(); }
141
142 // Section 1: MacroAssembler operand types
143 //
144 // The following types are used as operands to MacroAssembler operations,
145 // describing immediate and memory operands to the instructions to be planted.
146
147 enum Scale {
148 TimesOne,
149 TimesTwo,
150 TimesFour,
151 TimesEight,
152 };
153
154 static Scale timesPtr()
155 {
156 if (sizeof(void*) == 4)
157 return TimesFour;
158 return TimesEight;
159 }
160
161 // Address:
162 //
163 // Describes a simple base-offset address.
164 struct Address {
165 explicit Address(RegisterID base, int32_t offset = 0)
166 : base(base)
167 , offset(offset)
168 {
169 }
170
171 Address withOffset(int32_t additionalOffset)
172 {
173 return Address(base, offset + additionalOffset);
174 }
175
176 RegisterID base;
177 int32_t offset;
178 };
179
180 struct ExtendedAddress {
181 explicit ExtendedAddress(RegisterID base, intptr_t offset = 0)
182 : base(base)
183 , offset(offset)
184 {
185 }
186
187 RegisterID base;
188 intptr_t offset;
189 };
190
191 // ImplicitAddress:
192 //
193 // This class is used for explicit 'load' and 'store' operations
194 // (as opposed to situations in which a memory operand is provided
195 // to a generic operation, such as an integer arithmetic instruction).
196 //
197 // In the case of a load (or store) operation we want to permit
198 // addresses to be implicitly constructed, e.g. the two calls:
199 //
200 // load32(Address(addrReg), destReg);
201 // load32(addrReg, destReg);
202 //
203 // Are equivalent, and the explicit wrapping of the Address in the former
204 // is unnecessary.
205 struct ImplicitAddress {
206 ImplicitAddress(RegisterID base)
207 : base(base)
208 , offset(0)
209 {
210 }
211
212 ImplicitAddress(Address address)
213 : base(address.base)
214 , offset(address.offset)
215 {
216 }
217
218 RegisterID base;
219 int32_t offset;
220 };
221
222 // BaseIndex:
223 //
224 // Describes a complex addressing mode.
225 struct BaseIndex {
226 BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
227 : base(base)
228 , index(index)
229 , scale(scale)
230 , offset(offset)
231 {
232 }
233
234 RegisterID base;
235 RegisterID index;
236 Scale scale;
237 int32_t offset;
238
239 BaseIndex withOffset(int32_t additionalOffset)
240 {
241 return BaseIndex(base, index, scale, offset + additionalOffset);
242 }
243 };
244
245 // AbsoluteAddress:
246 //
247 // Describes an memory operand given by a pointer. For regular load & store
248 // operations an unwrapped void* will be used, rather than using this.
249 struct AbsoluteAddress {
250 explicit AbsoluteAddress(const void* ptr)
251 : m_ptr(ptr)
252 {
253 }
254
255 const void* m_ptr;
256 };
257
258 // TrustedImmPtr:
259 //
260 // A pointer sized immediate operand to an instruction - this is wrapped
261 // in a class requiring explicit construction in order to differentiate
262 // from pointers used as absolute addresses to memory operations
263 struct TrustedImmPtr {
264 TrustedImmPtr() { }
265
266 explicit TrustedImmPtr(const void* value)
267 : m_value(value)
268 {
269 }
270
271 // This is only here so that TrustedImmPtr(0) does not confuse the C++
272 // overload handling rules.
273 explicit TrustedImmPtr(int value)
274 : m_value(0)
275 {
276 ASSERT_UNUSED(value, !value);
277 }
278
279 explicit TrustedImmPtr(size_t value)
280 : m_value(reinterpret_cast<void*>(value))
281 {
282 }
283
284 intptr_t asIntptr()
285 {
286 return reinterpret_cast<intptr_t>(m_value);
287 }
288
289 const void* m_value;
290 };
291
292 struct ImmPtr : private TrustedImmPtr
293 {
294 explicit ImmPtr(const void* value)
295 : TrustedImmPtr(value)
296 {
297 }
298
299 TrustedImmPtr asTrustedImmPtr() { return *this; }
300 };
301
302 // TrustedImm32:
303 //
304 // A 32bit immediate operand to an instruction - this is wrapped in a
305 // class requiring explicit construction in order to prevent RegisterIDs
306 // (which are implemented as an enum) from accidentally being passed as
307 // immediate values.
308 struct TrustedImm32 {
309 TrustedImm32() { }
310
311 explicit TrustedImm32(int32_t value)
312 : m_value(value)
313 {
314 }
315
316#if !CPU(X86_64)
317 explicit TrustedImm32(TrustedImmPtr ptr)
318 : m_value(ptr.asIntptr())
319 {
320 }
321#endif
322
323 int32_t m_value;
324 };
325
326
327 struct Imm32 : private TrustedImm32 {
328 explicit Imm32(int32_t value)
329 : TrustedImm32(value)
330 {
331 }
332#if !CPU(X86_64)
333 explicit Imm32(TrustedImmPtr ptr)
334 : TrustedImm32(ptr)
335 {
336 }
337#endif
338 const TrustedImm32& asTrustedImm32() const { return *this; }
339
340 };
341
342 // TrustedImm64:
343 //
344 // A 64bit immediate operand to an instruction - this is wrapped in a
345 // class requiring explicit construction in order to prevent RegisterIDs
346 // (which are implemented as an enum) from accidentally being passed as
347 // immediate values.
348 struct TrustedImm64 {
349 TrustedImm64() { }
350
351 explicit TrustedImm64(int64_t value)
352 : m_value(value)
353 {
354 }
355
356#if CPU(X86_64) || CPU(ARM64)
357 explicit TrustedImm64(TrustedImmPtr ptr)
358 : m_value(ptr.asIntptr())
359 {
360 }
361#endif
362
363 int64_t m_value;
364 };
365
366 struct Imm64 : private TrustedImm64
367 {
368 explicit Imm64(int64_t value)
369 : TrustedImm64(value)
370 {
371 }
372#if CPU(X86_64) || CPU(ARM64)
373 explicit Imm64(TrustedImmPtr ptr)
374 : TrustedImm64(ptr)
375 {
376 }
377#endif
378 const TrustedImm64& asTrustedImm64() const { return *this; }
379 };
380
381 // Section 2: MacroAssembler code buffer handles
382 //
383 // The following types are used to reference items in the code buffer
384 // during JIT code generation. For example, the type Jump is used to
385 // track the location of a jump instruction so that it may later be
386 // linked to a label marking its destination.
387
388
389 // Label:
390 //
391 // A Label records a point in the generated instruction stream, typically such that
392 // it may be used as a destination for a jump.
393 class Label {
394 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
395 friend class AbstractMacroAssembler;
396 friend struct DFG::OSRExit;
397 friend class Jump;
398 friend class MacroAssemblerCodeRef;
399 friend class LinkBuffer;
400 friend class Watchpoint;
401
402 public:
403 Label()
404 {
405 }
406
407 Label(AbstractMacroAssemblerType* masm)
408 : m_label(masm->m_assembler.label())
409 {
410 masm->invalidateAllTempRegisters();
411 }
412
413 bool operator==(const Label& other) const { return m_label == other.m_label; }
414
415 bool isSet() const { return m_label.isSet(); }
416 private:
417 AssemblerLabel m_label;
418 };
419
420 // ConvertibleLoadLabel:
421 //
422 // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr
423 // so that:
424 //
425 // loadPtr(Address(a, i), b)
426 //
427 // becomes:
428 //
429 // addPtr(TrustedImmPtr(i), a, b)
430 class ConvertibleLoadLabel {
431 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
432 friend class AbstractMacroAssembler;
433 friend class LinkBuffer;
434
435 public:
436 ConvertibleLoadLabel()
437 {
438 }
439
440 ConvertibleLoadLabel(AbstractMacroAssemblerType* masm)
441 : m_label(masm->m_assembler.labelIgnoringWatchpoints())
442 {
443 }
444
445 bool isSet() const { return m_label.isSet(); }
446 private:
447 AssemblerLabel m_label;
448 };
449
450 // DataLabelPtr:
451 //
452 // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
453 // patched after the code has been generated.
454 class DataLabelPtr {
455 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
456 friend class AbstractMacroAssembler;
457 friend class LinkBuffer;
458 public:
459 DataLabelPtr()
460 {
461 }
462
463 DataLabelPtr(AbstractMacroAssemblerType* masm)
464 : m_label(masm->m_assembler.label())
465 {
466 }
467
468 bool isSet() const { return m_label.isSet(); }
469
470 private:
471 AssemblerLabel m_label;
472 };
473
474 // DataLabel32:
475 //
476 // A DataLabel32 is used to refer to a location in the code containing a 32-bit constant to be
477 // patched after the code has been generated.
478 class DataLabel32 {
479 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
480 friend class AbstractMacroAssembler;
481 friend class LinkBuffer;
482 public:
483 DataLabel32()
484 {
485 }
486
487 DataLabel32(AbstractMacroAssemblerType* masm)
488 : m_label(masm->m_assembler.label())
489 {
490 }
491
492 AssemblerLabel label() const { return m_label; }
493
494 private:
495 AssemblerLabel m_label;
496 };
497
498 // DataLabelCompact:
499 //
500 // A DataLabelCompact is used to refer to a location in the code containing a
501 // compact immediate to be patched after the code has been generated.
502 class DataLabelCompact {
503 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
504 friend class AbstractMacroAssembler;
505 friend class LinkBuffer;
506 public:
507 DataLabelCompact()
508 {
509 }
510
511 DataLabelCompact(AbstractMacroAssemblerType* masm)
512 : m_label(masm->m_assembler.label())
513 {
514 }
515
516 DataLabelCompact(AssemblerLabel label)
517 : m_label(label)
518 {
519 }
520
521 AssemblerLabel label() const { return m_label; }
522
523 private:
524 AssemblerLabel m_label;
525 };
526
527 // Call:
528 //
529 // A Call object is a reference to a call instruction that has been planted
530 // into the code buffer - it is typically used to link the call, setting the
531 // relative offset such that when executed it will call to the desired
532 // destination.
533 class Call {
534 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
535 friend class AbstractMacroAssembler;
536
537 public:
538 enum Flags {
539 None = 0x0,
540 Linkable = 0x1,
541 Near = 0x2,
542 Tail = 0x4,
543 LinkableNear = 0x3,
544 LinkableNearTail = 0x7,
545 };
546
547 Call()
548 : m_flags(None)
549 {
550 }
551
552 Call(AssemblerLabel jmp, Flags flags)
553 : m_label(jmp)
554 , m_flags(flags)
555 {
556 }
557
558 bool isFlagSet(Flags flag)
559 {
560 return m_flags & flag;
561 }
562
563 static Call fromTailJump(Jump jump)
564 {
565 return Call(jump.m_label, Linkable);
566 }
567
568 AssemblerLabel m_label;
569 private:
570 Flags m_flags;
571 };
572
573 // Jump:
574 //
575 // A jump object is a reference to a jump instruction that has been planted
576 // into the code buffer - it is typically used to link the jump, setting the
577 // relative offset such that when executed it will jump to the desired
578 // destination.
579 class Jump {
580 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
581 friend class AbstractMacroAssembler;
582 friend class Call;
583 friend struct DFG::OSRExit;
584 friend class LinkBuffer;
585 public:
586 Jump()
587 {
588 }
589
590#if CPU(ARM_THUMB2)
591 // Fixme: this information should be stored in the instruction stream, not in the Jump object.
592 Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid)
593 : m_label(jmp)
594 , m_type(type)
595 , m_condition(condition)
596 {
597 }
598#elif CPU(ARM64)
599 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type = ARM64Assembler::JumpNoCondition, ARM64Assembler::Condition condition = ARM64Assembler::ConditionInvalid)
600 : m_label(jmp)
601 , m_type(type)
602 , m_condition(condition)
603 {
604 }
605
606 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, bool is64Bit, ARM64Assembler::RegisterID compareRegister)
607 : m_label(jmp)
608 , m_type(type)
609 , m_condition(condition)
610 , m_is64Bit(is64Bit)
611 , m_compareRegister(compareRegister)
612 {
613 ASSERT((type == ARM64Assembler::JumpCompareAndBranch) || (type == ARM64Assembler::JumpCompareAndBranchFixedSize));
614 }
615
616 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister)
617 : m_label(jmp)
618 , m_type(type)
619 , m_condition(condition)
620 , m_bitNumber(bitNumber)
621 , m_compareRegister(compareRegister)
622 {
623 ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize));
624 }
625#elif CPU(SH4)
626 Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar)
627 : m_label(jmp)
628 , m_type(type)
629 {
630 }
631#else
632 Jump(AssemblerLabel jmp)
633 : m_label(jmp)
634 {
635 }
636#endif
637
638 Label label() const
639 {
640 Label result;
641 result.m_label = m_label;
642 return result;
643 }
644
645 void link(AbstractMacroAssemblerType* masm) const
646 {
647 masm->invalidateAllTempRegisters();
648
649#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
650 masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset());
651#endif
652
653#if CPU(ARM_THUMB2)
654 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
655#elif CPU(ARM64)
656 if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
657 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_is64Bit, m_compareRegister);
658 else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
659 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister);
660 else
661 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
662#elif CPU(SH4)
663 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type);
664#else
665 masm->m_assembler.linkJump(m_label, masm->m_assembler.label());
666#endif
667 }
668
669 void linkTo(Label label, AbstractMacroAssemblerType* masm) const
670 {
671#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
672 masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset);
673#endif
674
675#if CPU(ARM_THUMB2)
676 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
677#elif CPU(ARM64)
678 if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
679 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_is64Bit, m_compareRegister);
680 else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
681 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_bitNumber, m_compareRegister);
682 else
683 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
684#else
685 masm->m_assembler.linkJump(m_label, label.m_label);
686#endif
687 }
688
689 bool isSet() const { return m_label.isSet(); }
690
691 private:
692 AssemblerLabel m_label;
693#if CPU(ARM_THUMB2)
694 ARMv7Assembler::JumpType m_type;
695 ARMv7Assembler::Condition m_condition;
696#elif CPU(ARM64)
697 ARM64Assembler::JumpType m_type;
698 ARM64Assembler::Condition m_condition;
699 bool m_is64Bit;
700 unsigned m_bitNumber;
701 ARM64Assembler::RegisterID m_compareRegister;
702#endif
703#if CPU(SH4)
704 SH4Assembler::JumpType m_type;
705#endif
706 };
707
708 struct PatchableJump {
709 PatchableJump()
710 {
711 }
712
713 explicit PatchableJump(Jump jump)
714 : m_jump(jump)
715 {
716 }
717
718 operator Jump&() { return m_jump; }
719
720 Jump m_jump;
721 };
722
723 // JumpList:
724 //
725 // A JumpList is a set of Jump objects.
726 // All jumps in the set will be linked to the same destination.
727 class JumpList {
728 friend class LinkBuffer;
729
730 public:
731 typedef Vector<Jump, 2> JumpVector;
732
733 JumpList() { }
734
735 JumpList(Jump jump)
736 {
737 if (jump.isSet())
738 append(jump);
739 }
740
741 void link(AbstractMacroAssemblerType* masm)
742 {
743 size_t size = m_jumps.size();
744 for (size_t i = 0; i < size; ++i)
745 m_jumps[i].link(masm);
746 m_jumps.clear();
747 }
748
749 void linkTo(Label label, AbstractMacroAssemblerType* masm)
750 {
751 size_t size = m_jumps.size();
752 for (size_t i = 0; i < size; ++i)
753 m_jumps[i].linkTo(label, masm);
754 m_jumps.clear();
755 }
756
757 void append(Jump jump)
758 {
759 m_jumps.append(jump);
760 }
761
762 void append(const JumpList& other)
763 {
764 m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
765 }
766
767 bool empty()
768 {
769 return !m_jumps.size();
770 }
771
772 void clear()
773 {
774 m_jumps.clear();
775 }
776
777 const JumpVector& jumps() const { return m_jumps; }
778
779 private:
780 JumpVector m_jumps;
781 };
782
783
784 // Section 3: Misc admin methods
785#if ENABLE(DFG_JIT)
786 Label labelIgnoringWatchpoints()
787 {
788 Label result;
789 result.m_label = m_assembler.labelIgnoringWatchpoints();
790 return result;
791 }
792#else
793 Label labelIgnoringWatchpoints()
794 {
795 return label();
796 }
797#endif
798
799 Label label()
800 {
801 return Label(this);
802 }
803
804 void padBeforePatch()
805 {
806 // Rely on the fact that asking for a label already does the padding.
807 (void)label();
808 }
809
810 Label watchpointLabel()
811 {
812 Label result;
813 result.m_label = m_assembler.labelForWatchpoint();
814 return result;
815 }
816
817 Label align()
818 {
819 m_assembler.align(16);
820 return Label(this);
821 }
822
823#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
824 class RegisterAllocationOffset {
825 public:
826 RegisterAllocationOffset(unsigned offset)
827 : m_offset(offset)
828 {
829 }
830
831 void checkOffsets(unsigned low, unsigned high)
832 {
833 RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset, low, high);
834 }
835
836 private:
837 unsigned m_offset;
838 };
839
840 void addRegisterAllocationAtOffset(unsigned offset)
841 {
842 m_registerAllocationForOffsets.append(RegisterAllocationOffset(offset));
843 }
844
845 void clearRegisterAllocationOffsets()
846 {
847 m_registerAllocationForOffsets.clear();
848 }
849
850 void checkRegisterAllocationAgainstBranchRange(unsigned offset1, unsigned offset2)
851 {
852 if (offset1 > offset2)
853 std::swap(offset1, offset2);
854
855 size_t size = m_registerAllocationForOffsets.size();
856 for (size_t i = 0; i < size; ++i)
857 m_registerAllocationForOffsets[i].checkOffsets(offset1, offset2);
858 }
859#endif
860
861 template<typename T, typename U>
862 static ptrdiff_t differenceBetween(T from, U to)
863 {
864 return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
865 }
866
867 static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b)
868 {
869 return reinterpret_cast<ptrdiff_t>(b.executableAddress()) - reinterpret_cast<ptrdiff_t>(a.executableAddress());
870 }
871
872 unsigned debugOffset() { return m_assembler.debugOffset(); }
873
874 ALWAYS_INLINE static void cacheFlush(void* code, size_t size)
875 {
876 AssemblerType::cacheFlush(code, size);
877 }
878
879#if ENABLE(MASM_PROBE)
880
881 struct CPUState {
882 #define DECLARE_REGISTER(_type, _regName) \
883 _type _regName;
884 FOR_EACH_CPU_REGISTER(DECLARE_REGISTER)
885 #undef DECLARE_REGISTER
886
887 static const char* gprName(RegisterID regID)
888 {
889 switch (regID) {
890 #define DECLARE_REGISTER(_type, _regName) \
891 case RegisterID::_regName: \
892 return #_regName;
893 FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
894 #undef DECLARE_REGISTER
895 default:
896 RELEASE_ASSERT_NOT_REACHED();
897 }
898 }
899
900 static const char* fprName(FPRegisterID regID)
901 {
902 switch (regID) {
903 #define DECLARE_REGISTER(_type, _regName) \
904 case FPRegisterID::_regName: \
905 return #_regName;
906 FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
907 #undef DECLARE_REGISTER
908 default:
909 RELEASE_ASSERT_NOT_REACHED();
910 }
911 }
912
913 void*& gpr(RegisterID regID)
914 {
915 switch (regID) {
916 #define DECLARE_REGISTER(_type, _regName) \
917 case RegisterID::_regName: \
918 return _regName;
919 FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
920 #undef DECLARE_REGISTER
921 default:
922 RELEASE_ASSERT_NOT_REACHED();
923 }
924 }
925
926 double& fpr(FPRegisterID regID)
927 {
928 switch (regID) {
929 #define DECLARE_REGISTER(_type, _regName) \
930 case FPRegisterID::_regName: \
931 return _regName;
932 FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
933 #undef DECLARE_REGISTER
934 default:
935 RELEASE_ASSERT_NOT_REACHED();
936 }
937 }
938 };
939
940 struct ProbeContext;
941 typedef void (*ProbeFunction)(struct ProbeContext*);
942
943 struct ProbeContext {
944 ProbeFunction probeFunction;
945 void* arg1;
946 void* arg2;
947 CPUState cpu;
948
949 // Convenience methods:
950 void*& gpr(RegisterID regID) { return cpu.gpr(regID); }
951 double& fpr(FPRegisterID regID) { return cpu.fpr(regID); }
952 const char* gprName(RegisterID regID) { return cpu.gprName(regID); }
953 const char* fprName(FPRegisterID regID) { return cpu.fprName(regID); }
954 };
955
956 // This function emits code to preserve the CPUState (e.g. registers),
957 // call a user supplied probe function, and restore the CPUState before
958 // continuing with other JIT generated code.
959 //
960 // The user supplied probe function will be called with a single pointer to
961 // a ProbeContext struct (defined above) which contains, among other things,
962 // the preserved CPUState. This allows the user probe function to inspect
963 // the CPUState at that point in the JIT generated code.
964 //
965 // If the user probe function alters the register values in the ProbeContext,
966 // the altered values will be loaded into the CPU registers when the probe
967 // returns.
968 //
969 // The ProbeContext is stack allocated and is only valid for the duration
970 // of the call to the user probe function.
971 //
972 // Note: probe() should be implemented by the target specific MacroAssembler.
973 // This prototype is only provided here to document the interface.
974
975 void probe(ProbeFunction, void* arg1, void* arg2);
976
977#endif // ENABLE(MASM_PROBE)
978
979 AssemblerType m_assembler;
980
981 static void linkJump(void* code, Jump jump, CodeLocationLabel target)
982 {
983 AssemblerType::linkJump(code, jump.m_label, target.dataLocation());
984 }
985
986 static void linkPointer(void* code, AssemblerLabel label, void* value)
987 {
988 AssemblerType::linkPointer(code, label, value);
989 }
990
991 static void* getLinkerAddress(void* code, AssemblerLabel label)
992 {
993 return AssemblerType::getRelocatedAddress(code, label);
994 }
995
996 static unsigned getLinkerCallReturnOffset(Call call)
997 {
998 return AssemblerType::getCallReturnOffset(call.m_label);
999 }
1000
1001 static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
1002 {
1003 AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
1004 }
1005
1006 static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
1007 {
1008 switch (nearCall.callMode()) {
1009 case NearCallMode::Tail:
1010 AssemblerType::relinkJump(nearCall.dataLocation(), destination.dataLocation());
1011 return;
1012 case NearCallMode::Regular:
1013 AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
1014 return;
1015 }
1016 RELEASE_ASSERT_NOT_REACHED();
1017 }
1018
1019 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
1020 {
1021 AssemblerType::repatchCompact(dataLabelCompact.dataLocation(), value);
1022 }
1023
1024 static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
1025 {
1026 AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
1027 }
1028
1029 static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
1030 {
1031 AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
1032 }
1033
1034 static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr)
1035 {
1036 return AssemblerType::readPointer(dataLabelPtr.dataLocation());
1037 }
1038
1039 static void replaceWithLoad(CodeLocationConvertibleLoad label)
1040 {
1041 AssemblerType::replaceWithLoad(label.dataLocation());
1042 }
1043
1044 static void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
1045 {
1046 AssemblerType::replaceWithAddressComputation(label.dataLocation());
1047 }
1048
1049 template<typename Functor>
1050 void addLinkTask(const Functor& functor)
1051 {
1052 m_linkTasks.append(createSharedTask<void(LinkBuffer&)>(functor));
1053 }
1054
1055protected:
1056 AbstractMacroAssembler()
1057 : m_randomSource(cryptographicallyRandomNumber())
1058 {
1059 invalidateAllTempRegisters();
1060 }
1061
1062 uint32_t random()
1063 {
1064 return m_randomSource.getUint32();
1065 }
1066
1067 WeakRandom m_randomSource;
1068
1069#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1070 Vector<RegisterAllocationOffset, 10> m_registerAllocationForOffsets;
1071#endif
1072
1073 static bool haveScratchRegisterForBlinding()
1074 {
1075 return false;
1076 }
1077 static RegisterID scratchRegisterForBlinding()
1078 {
1079 UNREACHABLE_FOR_PLATFORM();
1080 return firstRegister();
1081 }
1082 static bool canBlind() { return false; }
1083 static bool shouldBlindForSpecificArch(uint32_t) { return false; }
1084 static bool shouldBlindForSpecificArch(uint64_t) { return false; }
1085
1086 class CachedTempRegister {
1087 friend class DataLabelPtr;
1088 friend class DataLabel32;
1089 friend class DataLabelCompact;
1090 friend class Jump;
1091 friend class Label;
1092
1093 public:
1094 CachedTempRegister(AbstractMacroAssemblerType* masm, RegisterID registerID)
1095 : m_masm(masm)
1096 , m_registerID(registerID)
1097 , m_value(0)
1098 , m_validBit(1 << static_cast<unsigned>(registerID))
1099 {
1100 ASSERT(static_cast<unsigned>(registerID) < (sizeof(unsigned) * 8));
1101 }
1102
1103 ALWAYS_INLINE RegisterID registerIDInvalidate() { invalidate(); return m_registerID; }
1104
1105 ALWAYS_INLINE RegisterID registerIDNoInvalidate() { return m_registerID; }
1106
1107 bool value(intptr_t& value)
1108 {
1109 value = m_value;
1110 return m_masm->isTempRegisterValid(m_validBit);
1111 }
1112
1113 void setValue(intptr_t value)
1114 {
1115 m_value = value;
1116 m_masm->setTempRegisterValid(m_validBit);
1117 }
1118
1119 ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); }
1120
1121 private:
1122 AbstractMacroAssemblerType* m_masm;
1123 RegisterID m_registerID;
1124 intptr_t m_value;
1125 unsigned m_validBit;
1126 };
1127
1128 ALWAYS_INLINE void invalidateAllTempRegisters()
1129 {
1130 m_tempRegistersValidBits = 0;
1131 }
1132
1133 ALWAYS_INLINE bool isTempRegisterValid(unsigned registerMask)
1134 {
1135 return (m_tempRegistersValidBits & registerMask);
1136 }
1137
1138 ALWAYS_INLINE void clearTempRegisterValid(unsigned registerMask)
1139 {
1140 m_tempRegistersValidBits &= ~registerMask;
1141 }
1142
1143 ALWAYS_INLINE void setTempRegisterValid(unsigned registerMask)
1144 {
1145 m_tempRegistersValidBits |= registerMask;
1146 }
1147
1148 friend class AllowMacroScratchRegisterUsage;
1149 friend class DisallowMacroScratchRegisterUsage;
1150 unsigned m_tempRegistersValidBits;
1151 bool m_allowScratchRegister { true };
1152
1153 Vector<RefPtr<SharedTask<void(LinkBuffer&)>>> m_linkTasks;
1154
1155 friend class LinkBuffer;
1156}; // class AbstractMacroAssembler
1157
1158} // namespace JSC
1159
1160#endif // ENABLE(ASSEMBLER)
1161
1162#endif // AbstractMacroAssembler_h
1163