1/*
2 * Copyright (C) 2008, 2012, 2014-2016 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef MacroAssemblerX86_64_h
27#define MacroAssemblerX86_64_h
28
29#if ENABLE(ASSEMBLER) && CPU(X86_64)
30
31#include "MacroAssemblerX86Common.h"
32
33#define REPATCH_OFFSET_CALL_R11 3
34
35inline bool CAN_SIGN_EXTEND_32_64(int64_t value) { return value == (int64_t)(int32_t)value; }
36
37namespace JSC {
38
39class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
40public:
41 static const Scale ScalePtr = TimesEight;
42
43 using MacroAssemblerX86Common::add32;
44 using MacroAssemblerX86Common::and32;
45 using MacroAssemblerX86Common::branchAdd32;
46 using MacroAssemblerX86Common::or32;
47 using MacroAssemblerX86Common::sub32;
48 using MacroAssemblerX86Common::load8;
49 using MacroAssemblerX86Common::load32;
50 using MacroAssemblerX86Common::store32;
51 using MacroAssemblerX86Common::store8;
52 using MacroAssemblerX86Common::call;
53 using MacroAssemblerX86Common::jump;
54 using MacroAssemblerX86Common::addDouble;
55 using MacroAssemblerX86Common::loadDouble;
56 using MacroAssemblerX86Common::convertInt32ToDouble;
57
58 void add32(TrustedImm32 imm, AbsoluteAddress address)
59 {
60 move(TrustedImmPtr(address.m_ptr), scratchRegister());
61 add32(imm, Address(scratchRegister()));
62 }
63
64 void and32(TrustedImm32 imm, AbsoluteAddress address)
65 {
66 move(TrustedImmPtr(address.m_ptr), scratchRegister());
67 and32(imm, Address(scratchRegister()));
68 }
69
70 void add32(AbsoluteAddress address, RegisterID dest)
71 {
72 move(TrustedImmPtr(address.m_ptr), scratchRegister());
73 add32(Address(scratchRegister()), dest);
74 }
75
76 void or32(TrustedImm32 imm, AbsoluteAddress address)
77 {
78 move(TrustedImmPtr(address.m_ptr), scratchRegister());
79 or32(imm, Address(scratchRegister()));
80 }
81
82 void or32(RegisterID reg, AbsoluteAddress address)
83 {
84 move(TrustedImmPtr(address.m_ptr), scratchRegister());
85 or32(reg, Address(scratchRegister()));
86 }
87
88 void sub32(TrustedImm32 imm, AbsoluteAddress address)
89 {
90 move(TrustedImmPtr(address.m_ptr), scratchRegister());
91 sub32(imm, Address(scratchRegister()));
92 }
93
94 void load8(const void* address, RegisterID dest)
95 {
96 move(TrustedImmPtr(address), dest);
97 load8(dest, dest);
98 }
99
100 void load32(const void* address, RegisterID dest)
101 {
102 if (dest == X86Registers::eax)
103 m_assembler.movl_mEAX(address);
104 else {
105 move(TrustedImmPtr(address), dest);
106 load32(dest, dest);
107 }
108 }
109
110 void addDouble(AbsoluteAddress address, FPRegisterID dest)
111 {
112 move(TrustedImmPtr(address.m_ptr), scratchRegister());
113 m_assembler.addsd_mr(0, scratchRegister(), dest);
114 }
115
116 void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
117 {
118 move(imm, scratchRegister());
119 m_assembler.cvtsi2sd_rr(scratchRegister(), dest);
120 }
121
122 void store32(TrustedImm32 imm, void* address)
123 {
124 move(TrustedImmPtr(address), scratchRegister());
125 store32(imm, scratchRegister());
126 }
127
128 void store32(RegisterID source, void* address)
129 {
130 if (source == X86Registers::eax)
131 m_assembler.movl_EAXm(address);
132 else {
133 move(TrustedImmPtr(address), scratchRegister());
134 store32(source, scratchRegister());
135 }
136 }
137
138 void store8(TrustedImm32 imm, void* address)
139 {
140 move(TrustedImmPtr(address), scratchRegister());
141 store8(imm, Address(scratchRegister()));
142 }
143
144 void store8(RegisterID reg, void* address)
145 {
146 move(TrustedImmPtr(address), scratchRegister());
147 store8(reg, Address(scratchRegister()));
148 }
149
150#if OS(WINDOWS)
151 Call callWithSlowPathReturnType()
152 {
153 // On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value.
154 // On entry, rcx should contain a pointer to this stack space. The other parameters are shifted to the right,
155 // rdx should contain the first argument, r8 should contain the second argument, and r9 should contain the third argument.
156 // On return, rax contains a pointer to this stack value. See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx.
157 // We then need to copy the 16 byte return value into rax and rdx, since JIT expects the return value to be split between the two.
158 // It is assumed that the parameters are already shifted to the right, when entering this method.
159 // Note: this implementation supports up to 3 parameters.
160
161 // JIT relies on the CallerFrame (frame pointer) being put on the stack,
162 // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
163 // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
164 store64(X86Registers::ebp, Address(X86Registers::esp, -16));
165
166 // We also need to allocate the shadow space on the stack for the 4 parameter registers.
167 // In addition, we need to allocate 16 bytes for the return value.
168 // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
169 sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
170
171 // The first parameter register should contain a pointer to the stack allocated space for the return value.
172 move(X86Registers::esp, X86Registers::ecx);
173 add64(TrustedImm32(4 * sizeof(int64_t)), X86Registers::ecx);
174
175 DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister());
176 Call result = Call(m_assembler.call(scratchRegister()), Call::Linkable);
177
178 add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
179
180 // Copy the return value into rax and rdx.
181 load64(Address(X86Registers::eax, sizeof(int64_t)), X86Registers::edx);
182 load64(Address(X86Registers::eax), X86Registers::eax);
183
184 ASSERT_UNUSED(label, differenceBetween(label, result) == REPATCH_OFFSET_CALL_R11);
185 return result;
186 }
187#endif
188
189 Call call()
190 {
191#if OS(WINDOWS)
192 // JIT relies on the CallerFrame (frame pointer) being put on the stack,
193 // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
194 // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
195 store64(X86Registers::ebp, Address(X86Registers::esp, -16));
196
197 // On Windows we need to copy the arguments that don't fit in registers to the stack location where the callee expects to find them.
198 // We don't know the number of arguments at this point, so the arguments (5, 6, ...) should always be copied.
199
200 // Copy argument 5
201 load64(Address(X86Registers::esp, 4 * sizeof(int64_t)), scratchRegister());
202 store64(scratchRegister(), Address(X86Registers::esp, -4 * static_cast<int32_t>(sizeof(int64_t))));
203
204 // Copy argument 6
205 load64(Address(X86Registers::esp, 5 * sizeof(int64_t)), scratchRegister());
206 store64(scratchRegister(), Address(X86Registers::esp, -3 * static_cast<int32_t>(sizeof(int64_t))));
207
208 // We also need to allocate the shadow space on the stack for the 4 parameter registers.
209 // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
210 // In addition, we need to allocate 16 bytes for two more parameters, since the call can have up to 6 parameters.
211 sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
212#endif
213 DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister());
214 Call result = Call(m_assembler.call(scratchRegister()), Call::Linkable);
215#if OS(WINDOWS)
216 add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
217#endif
218 ASSERT_UNUSED(label, differenceBetween(label, result) == REPATCH_OFFSET_CALL_R11);
219 return result;
220 }
221
222 // Address is a memory location containing the address to jump to
223 void jump(AbsoluteAddress address)
224 {
225 move(TrustedImmPtr(address.m_ptr), scratchRegister());
226 jump(Address(scratchRegister()));
227 }
228
229 Call tailRecursiveCall()
230 {
231 DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister());
232 Jump newJump = Jump(m_assembler.jmp_r(scratchRegister()));
233 ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPATCH_OFFSET_CALL_R11);
234 return Call::fromTailJump(newJump);
235 }
236
237 Call makeTailRecursiveCall(Jump oldJump)
238 {
239 oldJump.link(this);
240 DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister());
241 Jump newJump = Jump(m_assembler.jmp_r(scratchRegister()));
242 ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPATCH_OFFSET_CALL_R11);
243 return Call::fromTailJump(newJump);
244 }
245
246 Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest)
247 {
248 move(TrustedImmPtr(dest.m_ptr), scratchRegister());
249 add32(src, Address(scratchRegister()));
250 return Jump(m_assembler.jCC(x86Condition(cond)));
251 }
252
253 void add64(RegisterID src, RegisterID dest)
254 {
255 m_assembler.addq_rr(src, dest);
256 }
257
258 void add64(Address src, RegisterID dest)
259 {
260 m_assembler.addq_mr(src.offset, src.base, dest);
261 }
262
263 void add64(RegisterID src, Address dest)
264 {
265 m_assembler.addq_rm(src, dest.offset, dest.base);
266 }
267
268 void add64(AbsoluteAddress src, RegisterID dest)
269 {
270 move(TrustedImmPtr(src.m_ptr), scratchRegister());
271 add64(Address(scratchRegister()), dest);
272 }
273
274 void add64(TrustedImm32 imm, RegisterID srcDest)
275 {
276 if (imm.m_value == 1)
277 m_assembler.incq_r(srcDest);
278 else
279 m_assembler.addq_ir(imm.m_value, srcDest);
280 }
281
282 void add64(TrustedImm64 imm, RegisterID dest)
283 {
284 if (imm.m_value == 1)
285 m_assembler.incq_r(dest);
286 else {
287 move(imm, scratchRegister());
288 add64(scratchRegister(), dest);
289 }
290 }
291
292 void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
293 {
294 m_assembler.leaq_mr(imm.m_value, src, dest);
295 }
296
297 void add64(TrustedImm32 imm, Address address)
298 {
299 if (imm.m_value == 1)
300 m_assembler.incq_m(address.offset, address.base);
301 else
302 m_assembler.addq_im(imm.m_value, address.offset, address.base);
303 }
304
305 void add64(TrustedImm32 imm, AbsoluteAddress address)
306 {
307 move(TrustedImmPtr(address.m_ptr), scratchRegister());
308 add64(imm, Address(scratchRegister()));
309 }
310
311 void add64(RegisterID a, RegisterID b, RegisterID dest)
312 {
313 x86Lea64(BaseIndex(a, b, TimesOne), dest);
314 }
315
316 void x86Lea64(BaseIndex index, RegisterID dest)
317 {
318 if (!index.scale && !index.offset) {
319 if (index.base == dest) {
320 add64(index.index, dest);
321 return;
322 }
323 if (index.index == dest) {
324 add64(index.base, dest);
325 return;
326 }
327 }
328 m_assembler.leaq_mr(index.offset, index.base, index.index, index.scale, dest);
329 }
330
331 void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
332 {
333 m_assembler.leaq_mr(imm.m_value, srcDest, srcDest);
334 }
335
336 void and64(RegisterID src, RegisterID dest)
337 {
338 m_assembler.andq_rr(src, dest);
339 }
340
341 void and64(TrustedImm32 imm, RegisterID srcDest)
342 {
343 m_assembler.andq_ir(imm.m_value, srcDest);
344 }
345
346 void and64(TrustedImmPtr imm, RegisterID srcDest)
347 {
348 intptr_t intValue = imm.asIntptr();
349 if (intValue <= std::numeric_limits<int32_t>::max()
350 && intValue >= std::numeric_limits<int32_t>::min()) {
351 and64(TrustedImm32(static_cast<int32_t>(intValue)), srcDest);
352 return;
353 }
354 move(imm, scratchRegister());
355 and64(scratchRegister(), srcDest);
356 }
357
358 void and64(RegisterID op1, RegisterID op2, RegisterID dest)
359 {
360 if (op1 == op2 && op1 != dest && op2 != dest)
361 move(op1, dest);
362 else if (op1 == dest)
363 and64(op2, dest);
364 else {
365 move(op2, dest);
366 and64(op1, dest);
367 }
368 }
369
370 void countLeadingZeros64(RegisterID src, RegisterID dst)
371 {
372 if (supportsLZCNT()) {
373 m_assembler.lzcntq_rr(src, dst);
374 return;
375 }
376 m_assembler.bsrq_rr(src, dst);
377 clz64AfterBsr(dst);
378 }
379
380 void countLeadingZeros64(Address src, RegisterID dst)
381 {
382 if (supportsLZCNT()) {
383 m_assembler.lzcntq_mr(src.offset, src.base, dst);
384 return;
385 }
386 m_assembler.bsrq_mr(src.offset, src.base, dst);
387 clz64AfterBsr(dst);
388 }
389
390 void lshift64(TrustedImm32 imm, RegisterID dest)
391 {
392 m_assembler.shlq_i8r(imm.m_value, dest);
393 }
394
395 void lshift64(RegisterID src, RegisterID dest)
396 {
397 if (src == X86Registers::ecx)
398 m_assembler.shlq_CLr(dest);
399 else {
400 ASSERT(src != dest);
401
402 // Can only shift by ecx, so we do some swapping if we see anything else.
403 swap(src, X86Registers::ecx);
404 m_assembler.shlq_CLr(dest);
405 swap(src, X86Registers::ecx);
406 }
407 }
408
409 void rshift64(TrustedImm32 imm, RegisterID dest)
410 {
411 m_assembler.sarq_i8r(imm.m_value, dest);
412 }
413
414 void rshift64(RegisterID src, RegisterID dest)
415 {
416 if (src == X86Registers::ecx)
417 m_assembler.sarq_CLr(dest);
418 else {
419 ASSERT(src != dest);
420
421 // Can only shift by ecx, so we do some swapping if we see anything else.
422 swap(src, X86Registers::ecx);
423 m_assembler.sarq_CLr(dest);
424 swap(src, X86Registers::ecx);
425 }
426 }
427
428 void urshift64(TrustedImm32 imm, RegisterID dest)
429 {
430 m_assembler.shrq_i8r(imm.m_value, dest);
431 }
432
433 void urshift64(RegisterID src, RegisterID dest)
434 {
435 if (src == X86Registers::ecx)
436 m_assembler.shrq_CLr(dest);
437 else {
438 ASSERT(src != dest);
439
440 // Can only shift by ecx, so we do some swapping if we see anything else.
441 swap(src, X86Registers::ecx);
442 m_assembler.shrq_CLr(dest);
443 swap(src, X86Registers::ecx);
444 }
445 }
446
447 void mul64(RegisterID src, RegisterID dest)
448 {
449 m_assembler.imulq_rr(src, dest);
450 }
451
452 void mul64(RegisterID src1, RegisterID src2, RegisterID dest)
453 {
454 if (src2 == dest) {
455 m_assembler.imulq_rr(src1, dest);
456 return;
457 }
458 move(src1, dest);
459 m_assembler.imulq_rr(src2, dest);
460 }
461
462 void x86ConvertToQuadWord64()
463 {
464 m_assembler.cqo();
465 }
466
467 void x86ConvertToQuadWord64(RegisterID rax, RegisterID rdx)
468 {
469 ASSERT_UNUSED(rax, rax == X86Registers::eax);
470 ASSERT_UNUSED(rdx, rdx == X86Registers::edx);
471 x86ConvertToQuadWord64();
472 }
473
474 void x86Div64(RegisterID denominator)
475 {
476 m_assembler.idivq_r(denominator);
477 }
478
479 void x86Div64(RegisterID rax, RegisterID rdx, RegisterID denominator)
480 {
481 ASSERT_UNUSED(rax, rax == X86Registers::eax);
482 ASSERT_UNUSED(rdx, rdx == X86Registers::edx);
483 x86Div64(denominator);
484 }
485
486 void neg64(RegisterID dest)
487 {
488 m_assembler.negq_r(dest);
489 }
490
491 void or64(RegisterID src, RegisterID dest)
492 {
493 m_assembler.orq_rr(src, dest);
494 }
495
496 void or64(TrustedImm64 imm, RegisterID srcDest)
497 {
498 if (imm.m_value <= std::numeric_limits<int32_t>::max()
499 && imm.m_value >= std::numeric_limits<int32_t>::min()) {
500 or64(TrustedImm32(static_cast<int32_t>(imm.m_value)), srcDest);
501 return;
502 }
503 move(imm, scratchRegister());
504 or64(scratchRegister(), srcDest);
505 }
506
507 void or64(TrustedImm32 imm, RegisterID dest)
508 {
509 m_assembler.orq_ir(imm.m_value, dest);
510 }
511
512 void or64(RegisterID op1, RegisterID op2, RegisterID dest)
513 {
514 if (op1 == op2)
515 move(op1, dest);
516 else if (op1 == dest)
517 or64(op2, dest);
518 else {
519 move(op2, dest);
520 or64(op1, dest);
521 }
522 }
523
524 void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
525 {
526 move(src, dest);
527 or64(imm, dest);
528 }
529
530 void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
531 {
532 m_assembler.rorq_i8r(imm.m_value, srcDst);
533 }
534
535 void sub64(RegisterID src, RegisterID dest)
536 {
537 m_assembler.subq_rr(src, dest);
538 }
539
540 void sub64(TrustedImm32 imm, RegisterID dest)
541 {
542 if (imm.m_value == 1)
543 m_assembler.decq_r(dest);
544 else
545 m_assembler.subq_ir(imm.m_value, dest);
546 }
547
548 void sub64(TrustedImm64 imm, RegisterID dest)
549 {
550 if (imm.m_value == 1)
551 m_assembler.decq_r(dest);
552 else {
553 move(imm, scratchRegister());
554 sub64(scratchRegister(), dest);
555 }
556 }
557
558 void sub64(TrustedImm32 imm, Address address)
559 {
560 m_assembler.subq_im(imm.m_value, address.offset, address.base);
561 }
562
563 void sub64(Address src, RegisterID dest)
564 {
565 m_assembler.subq_mr(src.offset, src.base, dest);
566 }
567
568 void sub64(RegisterID src, Address dest)
569 {
570 m_assembler.subq_rm(src, dest.offset, dest.base);
571 }
572
573 void xor64(RegisterID src, RegisterID dest)
574 {
575 m_assembler.xorq_rr(src, dest);
576 }
577
578 void xor64(RegisterID op1, RegisterID op2, RegisterID dest)
579 {
580 if (op1 == op2)
581 move(TrustedImm32(0), dest);
582 else if (op1 == dest)
583 xor64(op2, dest);
584 else {
585 move(op2, dest);
586 xor64(op1, dest);
587 }
588 }
589
590 void xor64(RegisterID src, Address dest)
591 {
592 m_assembler.xorq_rm(src, dest.offset, dest.base);
593 }
594
595 void xor64(TrustedImm32 imm, RegisterID srcDest)
596 {
597 m_assembler.xorq_ir(imm.m_value, srcDest);
598 }
599
600 void not64(RegisterID srcDest)
601 {
602 m_assembler.notq_r(srcDest);
603 }
604
605 void not64(Address dest)
606 {
607 m_assembler.notq_m(dest.offset, dest.base);
608 }
609
610 void load64(ImplicitAddress address, RegisterID dest)
611 {
612 m_assembler.movq_mr(address.offset, address.base, dest);
613 }
614
615 void load64(BaseIndex address, RegisterID dest)
616 {
617 m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
618 }
619
620 void load64(const void* address, RegisterID dest)
621 {
622 if (dest == X86Registers::eax)
623 m_assembler.movq_mEAX(address);
624 else {
625 move(TrustedImmPtr(address), dest);
626 load64(dest, dest);
627 }
628 }
629
630 DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
631 {
632 padBeforePatch();
633 m_assembler.movq_mr_disp32(address.offset, address.base, dest);
634 return DataLabel32(this);
635 }
636
637 DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
638 {
639 padBeforePatch();
640 m_assembler.movq_mr_disp8(address.offset, address.base, dest);
641 return DataLabelCompact(this);
642 }
643
644 void store64(RegisterID src, ImplicitAddress address)
645 {
646 m_assembler.movq_rm(src, address.offset, address.base);
647 }
648
649 void store64(RegisterID src, BaseIndex address)
650 {
651 m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
652 }
653
654 void store64(RegisterID src, void* address)
655 {
656 if (src == X86Registers::eax)
657 m_assembler.movq_EAXm(address);
658 else {
659 move(TrustedImmPtr(address), scratchRegister());
660 store64(src, scratchRegister());
661 }
662 }
663
664 void store64(TrustedImm32 imm, ImplicitAddress address)
665 {
666 m_assembler.movq_i32m(imm.m_value, address.offset, address.base);
667 }
668
669 void store64(TrustedImm64 imm, ImplicitAddress address)
670 {
671 if (CAN_SIGN_EXTEND_32_64(imm.m_value)) {
672 store64(TrustedImm32(static_cast<int32_t>(imm.m_value)), address);
673 return;
674 }
675
676 move(imm, scratchRegister());
677 store64(scratchRegister(), address);
678 }
679
680 void store64(TrustedImm64 imm, BaseIndex address)
681 {
682 move(imm, scratchRegister());
683 m_assembler.movq_rm(scratchRegister(), address.offset, address.base, address.index, address.scale);
684 }
685
686 DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
687 {
688 padBeforePatch();
689 m_assembler.movq_rm_disp32(src, address.offset, address.base);
690 return DataLabel32(this);
691 }
692
693 void swap64(RegisterID src, RegisterID dest)
694 {
695 m_assembler.xchgq_rr(src, dest);
696 }
697
698 void swap64(RegisterID src, Address dest)
699 {
700 m_assembler.xchgq_rm(src, dest.offset, dest.base);
701 }
702
703 void move64ToDouble(RegisterID src, FPRegisterID dest)
704 {
705 m_assembler.movq_rr(src, dest);
706 }
707
708 void moveDoubleTo64(FPRegisterID src, RegisterID dest)
709 {
710 m_assembler.movq_rr(src, dest);
711 }
712
713 void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
714 {
715 if (!right.m_value) {
716 if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
717 test64(*resultCondition, left, left, dest);
718 return;
719 }
720 }
721
722 m_assembler.cmpq_ir(right.m_value, left);
723 set32(x86Condition(cond), dest);
724 }
725
726 void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
727 {
728 m_assembler.cmpq_rr(right, left);
729 set32(x86Condition(cond), dest);
730 }
731
732 void compareDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
733 {
734 if (cond & DoubleConditionBitInvert)
735 m_assembler.ucomisd_rr(left, right);
736 else
737 m_assembler.ucomisd_rr(right, left);
738
739 if (cond == DoubleEqual) {
740 if (left == right) {
741 m_assembler.setnp_r(dest);
742 return;
743 }
744
745 Jump isUnordered(m_assembler.jp());
746 m_assembler.sete_r(dest);
747 isUnordered.link(this);
748 return;
749 }
750
751 if (cond == DoubleNotEqualOrUnordered) {
752 if (left == right) {
753 m_assembler.setp_r(dest);
754 return;
755 }
756
757 m_assembler.setp_r(dest);
758 m_assembler.setne_r(dest);
759 return;
760 }
761
762 ASSERT(!(cond & DoubleConditionBitSpecial));
763 m_assembler.setCC_r(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits), dest);
764 }
765
766 Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
767 {
768 m_assembler.cmpq_rr(right, left);
769 return Jump(m_assembler.jCC(x86Condition(cond)));
770 }
771
772 Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm32 right)
773 {
774 if (!right.m_value) {
775 if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
776 return branchTest64(*resultCondition, left, left);
777 }
778 m_assembler.cmpq_ir(right.m_value, left);
779 return Jump(m_assembler.jCC(x86Condition(cond)));
780 }
781
782 Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
783 {
784 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) {
785 m_assembler.testq_rr(left, left);
786 return Jump(m_assembler.jCC(x86Condition(cond)));
787 }
788 move(right, scratchRegister());
789 return branch64(cond, left, scratchRegister());
790 }
791
792 Jump branch64(RelationalCondition cond, RegisterID left, Address right)
793 {
794 m_assembler.cmpq_mr(right.offset, right.base, left);
795 return Jump(m_assembler.jCC(x86Condition(cond)));
796 }
797
798 Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
799 {
800 move(TrustedImmPtr(left.m_ptr), scratchRegister());
801 return branch64(cond, Address(scratchRegister()), right);
802 }
803
804 Jump branch64(RelationalCondition cond, Address left, RegisterID right)
805 {
806 m_assembler.cmpq_rm(right, left.offset, left.base);
807 return Jump(m_assembler.jCC(x86Condition(cond)));
808 }
809
810 Jump branch64(RelationalCondition cond, Address left, TrustedImm32 right)
811 {
812 m_assembler.cmpq_im(right.m_value, left.offset, left.base);
813 return Jump(m_assembler.jCC(x86Condition(cond)));
814 }
815
816 Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
817 {
818 move(right, scratchRegister());
819 return branch64(cond, left, scratchRegister());
820 }
821
822 Jump branch64(RelationalCondition cond, BaseIndex address, RegisterID right)
823 {
824 m_assembler.cmpq_rm(right, address.offset, address.base, address.index, address.scale);
825 return Jump(m_assembler.jCC(x86Condition(cond)));
826 }
827
828 Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
829 {
830 return branch64(cond, left, right);
831 }
832
833 Jump branchPtr(RelationalCondition cond, BaseIndex left, TrustedImmPtr right)
834 {
835 move(right, scratchRegister());
836 return branchPtr(cond, left, scratchRegister());
837 }
838
839 Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
840 {
841 m_assembler.testq_rr(reg, mask);
842 return Jump(m_assembler.jCC(x86Condition(cond)));
843 }
844
845 Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
846 {
847 // if we are only interested in the low seven bits, this can be tested with a testb
848 if (mask.m_value == -1)
849 m_assembler.testq_rr(reg, reg);
850 else if ((mask.m_value & ~0x7f) == 0)
851 m_assembler.testb_i8r(mask.m_value, reg);
852 else
853 m_assembler.testq_i32r(mask.m_value, reg);
854 return Jump(m_assembler.jCC(x86Condition(cond)));
855 }
856
857 Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask)
858 {
859 move(mask, scratchRegister());
860 return branchTest64(cond, reg, scratchRegister());
861 }
862
863 void test64(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
864 {
865 if (mask.m_value == -1)
866 m_assembler.testq_rr(reg, reg);
867 else if ((mask.m_value & ~0x7f) == 0)
868 m_assembler.testb_i8r(mask.m_value, reg);
869 else
870 m_assembler.testq_i32r(mask.m_value, reg);
871 set32(x86Condition(cond), dest);
872 }
873
874 void test64(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
875 {
876 m_assembler.testq_rr(reg, mask);
877 set32(x86Condition(cond), dest);
878 }
879
880 Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
881 {
882 load64(address.m_ptr, scratchRegister());
883 return branchTest64(cond, scratchRegister(), mask);
884 }
885
886 Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
887 {
888 if (mask.m_value == -1)
889 m_assembler.cmpq_im(0, address.offset, address.base);
890 else
891 m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
892 return Jump(m_assembler.jCC(x86Condition(cond)));
893 }
894
895 Jump branchTest64(ResultCondition cond, Address address, RegisterID reg)
896 {
897 m_assembler.testq_rm(reg, address.offset, address.base);
898 return Jump(m_assembler.jCC(x86Condition(cond)));
899 }
900
901 Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
902 {
903 if (mask.m_value == -1)
904 m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
905 else
906 m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
907 return Jump(m_assembler.jCC(x86Condition(cond)));
908 }
909
910
911 Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
912 {
913 add64(imm, dest);
914 return Jump(m_assembler.jCC(x86Condition(cond)));
915 }
916
917 Jump branchAdd64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
918 {
919 if (src1 == dest)
920 return branchAdd64(cond, src2, dest);
921 move(src2, dest);
922 return branchAdd64(cond, src1, dest);
923 }
924
925 Jump branchAdd64(ResultCondition cond, Address src1, RegisterID src2, RegisterID dest)
926 {
927 move(src2, dest);
928 return branchAdd64(cond, src1, dest);
929 }
930
931 Jump branchAdd64(ResultCondition cond, RegisterID src1, Address src2, RegisterID dest)
932 {
933 move(src1, dest);
934 return branchAdd64(cond, src2, dest);
935 }
936
937 Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
938 {
939 add64(src, dest);
940 return Jump(m_assembler.jCC(x86Condition(cond)));
941 }
942
943 Jump branchAdd64(ResultCondition cond, Address src, RegisterID dest)
944 {
945 add64(src, dest);
946 return Jump(m_assembler.jCC(x86Condition(cond)));
947 }
948
949 Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
950 {
951 mul64(src, dest);
952 if (cond != Overflow)
953 m_assembler.testq_rr(dest, dest);
954 return Jump(m_assembler.jCC(x86Condition(cond)));
955 }
956
957 Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
958 {
959 if (src1 == dest)
960 return branchMul64(cond, src2, dest);
961 move(src2, dest);
962 return branchMul64(cond, src1, dest);
963 }
964
965 Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
966 {
967 sub64(imm, dest);
968 return Jump(m_assembler.jCC(x86Condition(cond)));
969 }
970
971 Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
972 {
973 sub64(src, dest);
974 return Jump(m_assembler.jCC(x86Condition(cond)));
975 }
976
977 Jump branchSub64(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
978 {
979 move(src1, dest);
980 return branchSub64(cond, src2, dest);
981 }
982
983 Jump branchNeg64(ResultCondition cond, RegisterID srcDest)
984 {
985 neg64(srcDest);
986 return Jump(m_assembler.jCC(x86Condition(cond)));
987 }
988
989 void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
990 {
991 m_assembler.cmpq_rr(right, left);
992 cmov(x86Condition(cond), src, dest);
993 }
994
995 void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
996 {
997 m_assembler.cmpq_rr(right, left);
998
999 if (thenCase != dest && elseCase != dest) {
1000 move(elseCase, dest);
1001 elseCase = dest;
1002 }
1003
1004 if (elseCase == dest)
1005 cmov(x86Condition(cond), thenCase, dest);
1006 else
1007 cmov(x86Condition(invert(cond)), elseCase, dest);
1008 }
1009
1010 void moveConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1011 {
1012 if (!right.m_value) {
1013 if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
1014 moveConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest);
1015 return;
1016 }
1017 }
1018
1019 m_assembler.cmpq_ir(right.m_value, left);
1020
1021 if (thenCase != dest && elseCase != dest) {
1022 move(elseCase, dest);
1023 elseCase = dest;
1024 }
1025
1026 if (elseCase == dest)
1027 cmov(x86Condition(cond), thenCase, dest);
1028 else
1029 cmov(x86Condition(invert(cond)), elseCase, dest);
1030 }
1031
1032 void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
1033 {
1034 m_assembler.testq_rr(testReg, mask);
1035 cmov(x86Condition(cond), src, dest);
1036 }
1037
1038 void moveConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1039 {
1040 ASSERT(isInvertible(cond));
1041 ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag.");
1042
1043 m_assembler.testq_rr(right, left);
1044
1045 if (thenCase != dest && elseCase != dest) {
1046 move(elseCase, dest);
1047 elseCase = dest;
1048 }
1049
1050 if (elseCase == dest)
1051 cmov(x86Condition(cond), thenCase, dest);
1052 else
1053 cmov(x86Condition(invert(cond)), elseCase, dest);
1054 }
1055
1056 void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID src, RegisterID dest)
1057 {
1058 // if we are only interested in the low seven bits, this can be tested with a testb
1059 if (mask.m_value == -1)
1060 m_assembler.testq_rr(testReg, testReg);
1061 else if ((mask.m_value & ~0x7f) == 0)
1062 m_assembler.testb_i8r(mask.m_value, testReg);
1063 else
1064 m_assembler.testq_i32r(mask.m_value, testReg);
1065 cmov(x86Condition(cond), src, dest);
1066 }
1067
1068 void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1069 {
1070 ASSERT(isInvertible(cond));
1071 ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag.");
1072
1073 if (mask.m_value == -1)
1074 m_assembler.testq_rr(testReg, testReg);
1075 else if (!(mask.m_value & ~0x7f))
1076 m_assembler.testb_i8r(mask.m_value, testReg);
1077 else
1078 m_assembler.testq_i32r(mask.m_value, testReg);
1079
1080 if (thenCase != dest && elseCase != dest) {
1081 move(elseCase, dest);
1082 elseCase = dest;
1083 }
1084
1085 if (elseCase == dest)
1086 cmov(x86Condition(cond), thenCase, dest);
1087 else
1088 cmov(x86Condition(invert(cond)), elseCase, dest);
1089 }
1090
1091 template<typename LeftType, typename RightType>
1092 void moveDoubleConditionally64(RelationalCondition cond, LeftType left, RightType right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
1093 {
1094 static_assert(!std::is_same<LeftType, FPRegisterID>::value && !std::is_same<RightType, FPRegisterID>::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble().");
1095
1096 if (thenCase != dest && elseCase != dest) {
1097 moveDouble(elseCase, dest);
1098 elseCase = dest;
1099 }
1100
1101 if (elseCase == dest) {
1102 Jump falseCase = branch64(invert(cond), left, right);
1103 moveDouble(thenCase, dest);
1104 falseCase.link(this);
1105 } else {
1106 Jump trueCase = branch64(cond, left, right);
1107 moveDouble(elseCase, dest);
1108 trueCase.link(this);
1109 }
1110 }
1111
1112 template<typename TestType, typename MaskType>
1113 void moveDoubleConditionallyTest64(ResultCondition cond, TestType test, MaskType mask, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
1114 {
1115 static_assert(!std::is_same<TestType, FPRegisterID>::value && !std::is_same<MaskType, FPRegisterID>::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble().");
1116
1117 if (elseCase == dest && isInvertible(cond)) {
1118 Jump falseCase = branchTest64(invert(cond), test, mask);
1119 moveDouble(thenCase, dest);
1120 falseCase.link(this);
1121 } else if (thenCase == dest) {
1122 Jump trueCase = branchTest64(cond, test, mask);
1123 moveDouble(elseCase, dest);
1124 trueCase.link(this);
1125 }
1126
1127 Jump trueCase = branchTest64(cond, test, mask);
1128 moveDouble(elseCase, dest);
1129 Jump falseCase = jump();
1130 trueCase.link(this);
1131 moveDouble(thenCase, dest);
1132 falseCase.link(this);
1133 }
1134
1135 void abortWithReason(AbortReason reason)
1136 {
1137 move(TrustedImm32(reason), X86Registers::r11);
1138 breakpoint();
1139 }
1140
1141 void abortWithReason(AbortReason reason, intptr_t misc)
1142 {
1143 move(TrustedImm64(misc), X86Registers::r10);
1144 abortWithReason(reason);
1145 }
1146
1147 ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
1148 {
1149 ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
1150 m_assembler.movq_mr(address.offset, address.base, dest);
1151 return result;
1152 }
1153
1154 DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
1155 {
1156 padBeforePatch();
1157 m_assembler.movq_i64r(initialValue.asIntptr(), dest);
1158 return DataLabelPtr(this);
1159 }
1160
1161 DataLabelPtr moveWithPatch(TrustedImm32 initialValue, RegisterID dest)
1162 {
1163 padBeforePatch();
1164 m_assembler.movq_i64r(initialValue.m_value, dest);
1165 return DataLabelPtr(this);
1166 }
1167
1168 Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1169 {
1170 dataLabel = moveWithPatch(initialRightValue, scratchRegister());
1171 return branch64(cond, left, scratchRegister());
1172 }
1173
1174 Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1175 {
1176 dataLabel = moveWithPatch(initialRightValue, scratchRegister());
1177 return branch64(cond, left, scratchRegister());
1178 }
1179
1180 Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
1181 {
1182 padBeforePatch();
1183 m_assembler.movl_i32r(initialRightValue.m_value, scratchRegister());
1184 dataLabel = DataLabel32(this);
1185 return branch32(cond, left, scratchRegister());
1186 }
1187
1188 DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
1189 {
1190 DataLabelPtr label = moveWithPatch(initialValue, scratchRegister());
1191 store64(scratchRegister(), address);
1192 return label;
1193 }
1194
1195 PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm)
1196 {
1197 return PatchableJump(branch64(cond, reg, imm));
1198 }
1199
1200 PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right)
1201 {
1202 return PatchableJump(branch64(cond, left, right));
1203 }
1204
1205 using MacroAssemblerX86Common::branch8;
1206 Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1207 {
1208 MacroAssemblerX86Common::move(TrustedImmPtr(left.m_ptr), scratchRegister());
1209 return MacroAssemblerX86Common::branch8(cond, Address(scratchRegister()), right);
1210 }
1211
1212 using MacroAssemblerX86Common::branchTest8;
1213 Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
1214 {
1215 TrustedImmPtr addr(reinterpret_cast<void*>(address.offset));
1216 MacroAssemblerX86Common::move(addr, scratchRegister());
1217 return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister(), address.base, TimesOne), mask);
1218 }
1219
1220 Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1221 {
1222 MacroAssemblerX86Common::move(TrustedImmPtr(address.m_ptr), scratchRegister());
1223 return MacroAssemblerX86Common::branchTest8(cond, Address(scratchRegister()), mask);
1224 }
1225
1226 void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
1227 {
1228 m_assembler.cvtsi2sdq_rr(src, dest);
1229 }
1230
1231 static bool supportsFloatingPoint() { return true; }
1232 static bool supportsFloatingPointTruncate() { return true; }
1233 static bool supportsFloatingPointSqrt() { return true; }
1234 static bool supportsFloatingPointAbs() { return true; }
1235
1236 static FunctionPtr readCallTarget(CodeLocationCall call)
1237 {
1238 return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation()));
1239 }
1240
1241 bool haveScratchRegisterForBlinding() { return m_allowScratchRegister; }
1242 RegisterID scratchRegisterForBlinding() { return scratchRegister(); }
1243
1244 static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
1245 static bool canJumpReplacePatchableBranch32WithPatch() { return true; }
1246
1247 static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
1248 {
1249 const int rexBytes = 1;
1250 const int opcodeBytes = 1;
1251 const int immediateBytes = 8;
1252 const int totalBytes = rexBytes + opcodeBytes + immediateBytes;
1253 ASSERT(totalBytes >= maxJumpReplacementSize());
1254 return label.labelAtOffset(-totalBytes);
1255 }
1256
1257 static CodeLocationLabel startOfBranch32WithPatchOnRegister(CodeLocationDataLabel32 label)
1258 {
1259 const int rexBytes = 1;
1260 const int opcodeBytes = 1;
1261 const int immediateBytes = 4;
1262 const int totalBytes = rexBytes + opcodeBytes + immediateBytes;
1263 ASSERT(totalBytes >= maxJumpReplacementSize());
1264 return label.labelAtOffset(-totalBytes);
1265 }
1266
1267 static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
1268 {
1269 return startOfBranchPtrWithPatchOnRegister(label);
1270 }
1271
1272 static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label)
1273 {
1274 return startOfBranch32WithPatchOnRegister(label);
1275 }
1276
1277 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue)
1278 {
1279 X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), s_scratchRegister);
1280 }
1281
1282 static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address, int32_t initialValue)
1283 {
1284 X86Assembler::revertJumpTo_movl_i32r(instructionStart.executableAddress(), initialValue, s_scratchRegister);
1285 }
1286
1287 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
1288 {
1289 X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), s_scratchRegister);
1290 }
1291
1292 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
1293 {
1294 X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
1295 }
1296
1297 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
1298 {
1299 X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
1300 }
1301
1302private:
1303 // If lzcnt is not available, use this after BSR
1304 // to count the leading zeros.
1305 void clz64AfterBsr(RegisterID dst)
1306 {
1307 Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero));
1308 move(TrustedImm32(64), dst);
1309
1310 Jump skipNonZeroCase = jump();
1311 srcIsNonZero.link(this);
1312 xor64(TrustedImm32(0x3f), dst);
1313 skipNonZeroCase.link(this);
1314 }
1315
1316 friend class LinkBuffer;
1317
1318 static void linkCall(void* code, Call call, FunctionPtr function)
1319 {
1320 if (!call.isFlagSet(Call::Near))
1321 X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPATCH_OFFSET_CALL_R11), function.value());
1322 else if (call.isFlagSet(Call::Tail))
1323 X86Assembler::linkJump(code, call.m_label, function.value());
1324 else
1325 X86Assembler::linkCall(code, call.m_label, function.value());
1326 }
1327};
1328
1329} // namespace JSC
1330
1331#endif // ENABLE(ASSEMBLER)
1332
1333#endif // MacroAssemblerX86_64_h
1334