1/*
2 * Copyright (C) 2008, 2014-2016 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef MacroAssemblerX86Common_h
27#define MacroAssemblerX86Common_h
28
29#if ENABLE(ASSEMBLER)
30
31#include "X86Assembler.h"
32#include "AbstractMacroAssembler.h"
33#include <wtf/Optional.h>
34
35#if COMPILER(MSVC)
36#include <intrin.h>
37#endif
38
39namespace JSC {
40
41class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler, MacroAssemblerX86Common> {
42public:
43#if CPU(X86_64)
44 // Use this directly only if you're not generating code with it.
45 static const X86Registers::RegisterID s_scratchRegister = X86Registers::r11;
46
47 // Use this when generating code so that we get enforcement of the disallowing of scratch register
48 // usage.
49 X86Registers::RegisterID scratchRegister()
50 {
51 RELEASE_ASSERT(m_allowScratchRegister);
52 return s_scratchRegister;
53 }
54#endif
55
56protected:
57 static const int DoubleConditionBitInvert = 0x10;
58 static const int DoubleConditionBitSpecial = 0x20;
59 static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
60
61public:
62 typedef X86Assembler::XMMRegisterID XMMRegisterID;
63
64 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
65 {
66 return value >= -128 && value <= 127;
67 }
68
69 enum RelationalCondition {
70 Equal = X86Assembler::ConditionE,
71 NotEqual = X86Assembler::ConditionNE,
72 Above = X86Assembler::ConditionA,
73 AboveOrEqual = X86Assembler::ConditionAE,
74 Below = X86Assembler::ConditionB,
75 BelowOrEqual = X86Assembler::ConditionBE,
76 GreaterThan = X86Assembler::ConditionG,
77 GreaterThanOrEqual = X86Assembler::ConditionGE,
78 LessThan = X86Assembler::ConditionL,
79 LessThanOrEqual = X86Assembler::ConditionLE
80 };
81
82 enum ResultCondition {
83 Overflow = X86Assembler::ConditionO,
84 Signed = X86Assembler::ConditionS,
85 PositiveOrZero = X86Assembler::ConditionNS,
86 Zero = X86Assembler::ConditionE,
87 NonZero = X86Assembler::ConditionNE
88 };
89
90 // FIXME: it would be neat to rename this to FloatingPointCondition in every assembler.
91 enum DoubleCondition {
92 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
93 DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
94 DoubleNotEqual = X86Assembler::ConditionNE,
95 DoubleGreaterThan = X86Assembler::ConditionA,
96 DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
97 DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
98 DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
99 // If either operand is NaN, these conditions always evaluate to true.
100 DoubleEqualOrUnordered = X86Assembler::ConditionE,
101 DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
102 DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
103 DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
104 DoubleLessThanOrUnordered = X86Assembler::ConditionB,
105 DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
106 };
107 COMPILE_ASSERT(
108 !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
109 DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
110
111 static const RegisterID stackPointerRegister = X86Registers::esp;
112 static const RegisterID framePointerRegister = X86Registers::ebp;
113
114 static bool canBlind() { return true; }
115 static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
116 static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
117
118 // Integer arithmetic operations:
119 //
120 // Operations are typically two operand - operation(source, srcDst)
121 // For many operations the source may be an TrustedImm32, the srcDst operand
122 // may often be a memory location (explictly described using an Address
123 // object).
124
125 void add32(RegisterID src, RegisterID dest)
126 {
127 m_assembler.addl_rr(src, dest);
128 }
129
130 void add32(TrustedImm32 imm, Address address)
131 {
132 m_assembler.addl_im(imm.m_value, address.offset, address.base);
133 }
134
135 void add32(TrustedImm32 imm, BaseIndex address)
136 {
137 m_assembler.addl_im(imm.m_value, address.offset, address.base, address.index, address.scale);
138 }
139
140 void add8(TrustedImm32 imm, Address address)
141 {
142 m_assembler.addb_im(imm.m_value, address.offset, address.base);
143 }
144
145 void add8(TrustedImm32 imm, BaseIndex address)
146 {
147 m_assembler.addb_im(imm.m_value, address.offset, address.base, address.index, address.scale);
148 }
149
150 void add16(TrustedImm32 imm, Address address)
151 {
152 m_assembler.addw_im(imm.m_value, address.offset, address.base);
153 }
154
155 void add16(TrustedImm32 imm, BaseIndex address)
156 {
157 m_assembler.addw_im(imm.m_value, address.offset, address.base, address.index, address.scale);
158 }
159
160 void add32(TrustedImm32 imm, RegisterID dest)
161 {
162 if (imm.m_value == 1)
163 m_assembler.inc_r(dest);
164 else
165 m_assembler.addl_ir(imm.m_value, dest);
166 }
167
168 void add32(Address src, RegisterID dest)
169 {
170 m_assembler.addl_mr(src.offset, src.base, dest);
171 }
172
173 void add32(RegisterID src, Address dest)
174 {
175 m_assembler.addl_rm(src, dest.offset, dest.base);
176 }
177
178 void add32(RegisterID src, BaseIndex dest)
179 {
180 m_assembler.addl_rm(src, dest.offset, dest.base, dest.index, dest.scale);
181 }
182
183 void add8(RegisterID src, Address dest)
184 {
185 m_assembler.addb_rm(src, dest.offset, dest.base);
186 }
187
188 void add8(RegisterID src, BaseIndex dest)
189 {
190 m_assembler.addb_rm(src, dest.offset, dest.base, dest.index, dest.scale);
191 }
192
193 void add16(RegisterID src, Address dest)
194 {
195 m_assembler.addw_rm(src, dest.offset, dest.base);
196 }
197
198 void add16(RegisterID src, BaseIndex dest)
199 {
200 m_assembler.addw_rm(src, dest.offset, dest.base, dest.index, dest.scale);
201 }
202
203 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
204 {
205 if (!imm.m_value) {
206 zeroExtend32ToPtr(src, dest);
207 return;
208 }
209
210 if (src == dest) {
211 add32(imm, dest);
212 return;
213 }
214
215 m_assembler.leal_mr(imm.m_value, src, dest);
216 }
217
218 void add32(RegisterID a, RegisterID b, RegisterID dest)
219 {
220 x86Lea32(BaseIndex(a, b, TimesOne), dest);
221 }
222
223 void x86Lea32(BaseIndex index, RegisterID dest)
224 {
225 if (!index.scale && !index.offset) {
226 if (index.base == dest) {
227 add32(index.index, dest);
228 return;
229 }
230 if (index.index == dest) {
231 add32(index.base, dest);
232 return;
233 }
234 }
235 m_assembler.leal_mr(index.offset, index.base, index.index, index.scale, dest);
236 }
237
238 void and32(RegisterID src, RegisterID dest)
239 {
240 m_assembler.andl_rr(src, dest);
241 }
242
243 void and32(TrustedImm32 imm, RegisterID dest)
244 {
245 m_assembler.andl_ir(imm.m_value, dest);
246 }
247
248 void and32(RegisterID src, Address dest)
249 {
250 m_assembler.andl_rm(src, dest.offset, dest.base);
251 }
252
253 void and32(Address src, RegisterID dest)
254 {
255 m_assembler.andl_mr(src.offset, src.base, dest);
256 }
257
258 void and32(TrustedImm32 imm, Address address)
259 {
260 m_assembler.andl_im(imm.m_value, address.offset, address.base);
261 }
262
263 void and32(RegisterID op1, RegisterID op2, RegisterID dest)
264 {
265 if (op1 == op2)
266 zeroExtend32ToPtr(op1, dest);
267 else if (op1 == dest)
268 and32(op2, dest);
269 else {
270 move32IfNeeded(op2, dest);
271 and32(op1, dest);
272 }
273 }
274
275 void and32(Address op1, RegisterID op2, RegisterID dest)
276 {
277 move32IfNeeded(op2, dest);
278 and32(op1, dest);
279 }
280
281 void and32(RegisterID op1, Address op2, RegisterID dest)
282 {
283 move32IfNeeded(op1, dest);
284 and32(op2, dest);
285 }
286
287 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
288 {
289 move32IfNeeded(src, dest);
290 and32(imm, dest);
291 }
292
293 void countLeadingZeros32(RegisterID src, RegisterID dst)
294 {
295 if (supportsLZCNT()) {
296 m_assembler.lzcnt_rr(src, dst);
297 return;
298 }
299 m_assembler.bsr_rr(src, dst);
300 clz32AfterBsr(dst);
301 }
302
303 void countLeadingZeros32(Address src, RegisterID dst)
304 {
305 if (supportsLZCNT()) {
306 m_assembler.lzcnt_mr(src.offset, src.base, dst);
307 return;
308 }
309 m_assembler.bsr_mr(src.offset, src.base, dst);
310 clz32AfterBsr(dst);
311 }
312
313 void lshift32(RegisterID shift_amount, RegisterID dest)
314 {
315 if (shift_amount == X86Registers::ecx)
316 m_assembler.shll_CLr(dest);
317 else {
318 ASSERT(shift_amount != dest);
319 // On x86 we can only shift by ecx; if asked to shift by another register we'll
320 // need rejig the shift amount into ecx first, and restore the registers afterwards.
321 // If we dest is ecx, then shift the swapped register!
322 swap(shift_amount, X86Registers::ecx);
323 m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest);
324 swap(shift_amount, X86Registers::ecx);
325 }
326 }
327
328 void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
329 {
330 ASSERT(shift_amount != dest);
331
332 move32IfNeeded(src, dest);
333 lshift32(shift_amount, dest);
334 }
335
336 void lshift32(TrustedImm32 imm, RegisterID dest)
337 {
338 m_assembler.shll_i8r(imm.m_value, dest);
339 }
340
341 void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
342 {
343 move32IfNeeded(src, dest);
344 lshift32(imm, dest);
345 }
346
347 void mul32(RegisterID src, RegisterID dest)
348 {
349 m_assembler.imull_rr(src, dest);
350 }
351
352 void mul32(RegisterID src1, RegisterID src2, RegisterID dest)
353 {
354 if (src2 == dest) {
355 m_assembler.imull_rr(src1, dest);
356 return;
357 }
358 move32IfNeeded(src1, dest);
359 m_assembler.imull_rr(src2, dest);
360 }
361
362 void mul32(Address src, RegisterID dest)
363 {
364 m_assembler.imull_mr(src.offset, src.base, dest);
365 }
366
367 void mul32(Address src1, RegisterID src2, RegisterID dest)
368 {
369 move32IfNeeded(src2, dest);
370 mul32(src1, dest);
371 }
372
373 void mul32(RegisterID src1, Address src2, RegisterID dest)
374 {
375 move32IfNeeded(src1, dest);
376 mul32(src2, dest);
377 }
378
379 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
380 {
381 m_assembler.imull_i32r(src, imm.m_value, dest);
382 }
383
384 void x86ConvertToDoubleWord32()
385 {
386 m_assembler.cdq();
387 }
388
389 void x86ConvertToDoubleWord32(RegisterID eax, RegisterID edx)
390 {
391 ASSERT_UNUSED(eax, eax == X86Registers::eax);
392 ASSERT_UNUSED(edx, edx == X86Registers::edx);
393 x86ConvertToDoubleWord32();
394 }
395
396 void x86Div32(RegisterID denominator)
397 {
398 m_assembler.idivl_r(denominator);
399 }
400
401 void x86Div32(RegisterID eax, RegisterID edx, RegisterID denominator)
402 {
403 ASSERT_UNUSED(eax, eax == X86Registers::eax);
404 ASSERT_UNUSED(edx, edx == X86Registers::edx);
405 x86Div32(denominator);
406 }
407
408 void neg32(RegisterID srcDest)
409 {
410 m_assembler.negl_r(srcDest);
411 }
412
413 void neg32(Address srcDest)
414 {
415 m_assembler.negl_m(srcDest.offset, srcDest.base);
416 }
417
418 void or32(RegisterID src, RegisterID dest)
419 {
420 m_assembler.orl_rr(src, dest);
421 }
422
423 void or32(TrustedImm32 imm, RegisterID dest)
424 {
425 m_assembler.orl_ir(imm.m_value, dest);
426 }
427
428 void or32(RegisterID src, Address dest)
429 {
430 m_assembler.orl_rm(src, dest.offset, dest.base);
431 }
432
433 void or32(Address src, RegisterID dest)
434 {
435 m_assembler.orl_mr(src.offset, src.base, dest);
436 }
437
438 void or32(TrustedImm32 imm, Address address)
439 {
440 m_assembler.orl_im(imm.m_value, address.offset, address.base);
441 }
442
443 void or32(RegisterID op1, RegisterID op2, RegisterID dest)
444 {
445 if (op1 == op2)
446 zeroExtend32ToPtr(op1, dest);
447 else if (op1 == dest)
448 or32(op2, dest);
449 else {
450 move32IfNeeded(op2, dest);
451 or32(op1, dest);
452 }
453 }
454
455 void or32(Address op1, RegisterID op2, RegisterID dest)
456 {
457 move32IfNeeded(op2, dest);
458 or32(op1, dest);
459 }
460
461 void or32(RegisterID op1, Address op2, RegisterID dest)
462 {
463 move32IfNeeded(op1, dest);
464 or32(op2, dest);
465 }
466
467 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
468 {
469 move32IfNeeded(src, dest);
470 or32(imm, dest);
471 }
472
473 void rshift32(RegisterID shift_amount, RegisterID dest)
474 {
475 if (shift_amount == X86Registers::ecx)
476 m_assembler.sarl_CLr(dest);
477 else {
478 ASSERT(shift_amount != dest);
479
480 // On x86 we can only shift by ecx; if asked to shift by another register we'll
481 // need rejig the shift amount into ecx first, and restore the registers afterwards.
482 // If we dest is ecx, then shift the swapped register!
483 swap(shift_amount, X86Registers::ecx);
484 m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
485 swap(shift_amount, X86Registers::ecx);
486 }
487 }
488
489 void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
490 {
491 ASSERT(shift_amount != dest);
492
493 move32IfNeeded(src, dest);
494 rshift32(shift_amount, dest);
495 }
496
497 void rshift32(TrustedImm32 imm, RegisterID dest)
498 {
499 m_assembler.sarl_i8r(imm.m_value, dest);
500 }
501
502 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
503 {
504 move32IfNeeded(src, dest);
505 rshift32(imm, dest);
506 }
507
508 void urshift32(RegisterID shift_amount, RegisterID dest)
509 {
510 if (shift_amount == X86Registers::ecx)
511 m_assembler.shrl_CLr(dest);
512 else {
513 ASSERT(shift_amount != dest);
514
515 // On x86 we can only shift by ecx; if asked to shift by another register we'll
516 // need rejig the shift amount into ecx first, and restore the registers afterwards.
517 // If we dest is ecx, then shift the swapped register!
518 swap(shift_amount, X86Registers::ecx);
519 m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
520 swap(shift_amount, X86Registers::ecx);
521 }
522 }
523
524 void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
525 {
526 ASSERT(shift_amount != dest);
527
528 move32IfNeeded(src, dest);
529 urshift32(shift_amount, dest);
530 }
531
532 void urshift32(TrustedImm32 imm, RegisterID dest)
533 {
534 m_assembler.shrl_i8r(imm.m_value, dest);
535 }
536
537 void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
538 {
539 move32IfNeeded(src, dest);
540 urshift32(imm, dest);
541 }
542
543 void sub32(RegisterID src, RegisterID dest)
544 {
545 m_assembler.subl_rr(src, dest);
546 }
547
548 void sub32(TrustedImm32 imm, RegisterID dest)
549 {
550 if (imm.m_value == 1)
551 m_assembler.dec_r(dest);
552 else
553 m_assembler.subl_ir(imm.m_value, dest);
554 }
555
556 void sub32(TrustedImm32 imm, Address address)
557 {
558 m_assembler.subl_im(imm.m_value, address.offset, address.base);
559 }
560
561 void sub32(Address src, RegisterID dest)
562 {
563 m_assembler.subl_mr(src.offset, src.base, dest);
564 }
565
566 void sub32(RegisterID src, Address dest)
567 {
568 m_assembler.subl_rm(src, dest.offset, dest.base);
569 }
570
571 void xor32(RegisterID src, RegisterID dest)
572 {
573 m_assembler.xorl_rr(src, dest);
574 }
575
576 void xor32(TrustedImm32 imm, Address dest)
577 {
578 if (imm.m_value == -1)
579 m_assembler.notl_m(dest.offset, dest.base);
580 else
581 m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
582 }
583
584 void xor32(TrustedImm32 imm, RegisterID dest)
585 {
586 if (imm.m_value == -1)
587 m_assembler.notl_r(dest);
588 else
589 m_assembler.xorl_ir(imm.m_value, dest);
590 }
591
592 void xor32(RegisterID src, Address dest)
593 {
594 m_assembler.xorl_rm(src, dest.offset, dest.base);
595 }
596
597 void xor32(Address src, RegisterID dest)
598 {
599 m_assembler.xorl_mr(src.offset, src.base, dest);
600 }
601
602 void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
603 {
604 if (op1 == op2)
605 move(TrustedImm32(0), dest);
606 else if (op1 == dest)
607 xor32(op2, dest);
608 else {
609 move32IfNeeded(op2, dest);
610 xor32(op1, dest);
611 }
612 }
613
614 void xor32(Address op1, RegisterID op2, RegisterID dest)
615 {
616 move32IfNeeded(op2, dest);
617 xor32(op1, dest);
618 }
619
620 void xor32(RegisterID op1, Address op2, RegisterID dest)
621 {
622 move32IfNeeded(op1, dest);
623 xor32(op2, dest);
624 }
625
626 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
627 {
628 move32IfNeeded(src, dest);
629 xor32(imm, dest);
630 }
631
632 void not32(RegisterID srcDest)
633 {
634 m_assembler.notl_r(srcDest);
635 }
636
637 void not32(Address dest)
638 {
639 m_assembler.notl_m(dest.offset, dest.base);
640 }
641
642 void sqrtDouble(FPRegisterID src, FPRegisterID dst)
643 {
644 m_assembler.sqrtsd_rr(src, dst);
645 }
646
647 void sqrtDouble(Address src, FPRegisterID dst)
648 {
649 m_assembler.sqrtsd_mr(src.offset, src.base, dst);
650 }
651
652 void sqrtFloat(FPRegisterID src, FPRegisterID dst)
653 {
654 m_assembler.sqrtss_rr(src, dst);
655 }
656
657 void sqrtFloat(Address src, FPRegisterID dst)
658 {
659 m_assembler.sqrtss_mr(src.offset, src.base, dst);
660 }
661
662 void absDouble(FPRegisterID src, FPRegisterID dst)
663 {
664 ASSERT(src != dst);
665 static const double negativeZeroConstant = -0.0;
666 loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
667 m_assembler.andnpd_rr(src, dst);
668 }
669
670 void negateDouble(FPRegisterID src, FPRegisterID dst)
671 {
672 ASSERT(src != dst);
673 static const double negativeZeroConstant = -0.0;
674 loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
675 m_assembler.xorpd_rr(src, dst);
676 }
677
678 void ceilDouble(FPRegisterID src, FPRegisterID dst)
679 {
680 m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::TowardInfiniti);
681 }
682
683 void ceilDouble(Address src, FPRegisterID dst)
684 {
685 m_assembler.roundsd_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardInfiniti);
686 }
687
688 void ceilFloat(FPRegisterID src, FPRegisterID dst)
689 {
690 m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::TowardInfiniti);
691 }
692
693 void ceilFloat(Address src, FPRegisterID dst)
694 {
695 m_assembler.roundss_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardInfiniti);
696 }
697
698 void floorDouble(FPRegisterID src, FPRegisterID dst)
699 {
700 m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::TowardNegativeInfiniti);
701 }
702
703 void floorDouble(Address src, FPRegisterID dst)
704 {
705 m_assembler.roundsd_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardNegativeInfiniti);
706 }
707
708 void floorFloat(FPRegisterID src, FPRegisterID dst)
709 {
710 m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::TowardNegativeInfiniti);
711 }
712
713 void floorFloat(Address src, FPRegisterID dst)
714 {
715 m_assembler.roundss_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardNegativeInfiniti);
716 }
717
718 // Memory access operations:
719 //
720 // Loads are of the form load(address, destination) and stores of the form
721 // store(source, address). The source for a store may be an TrustedImm32. Address
722 // operand objects to loads and store will be implicitly constructed if a
723 // register is passed.
724
725 void load32(ImplicitAddress address, RegisterID dest)
726 {
727 m_assembler.movl_mr(address.offset, address.base, dest);
728 }
729
730 void load32(BaseIndex address, RegisterID dest)
731 {
732 m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
733 }
734
735 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
736 {
737 load32(address, dest);
738 }
739
740 void load16Unaligned(BaseIndex address, RegisterID dest)
741 {
742 load16(address, dest);
743 }
744
745 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
746 {
747 padBeforePatch();
748 m_assembler.movl_mr_disp32(address.offset, address.base, dest);
749 return DataLabel32(this);
750 }
751
752 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
753 {
754 padBeforePatch();
755 m_assembler.movl_mr_disp8(address.offset, address.base, dest);
756 return DataLabelCompact(this);
757 }
758
759 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
760 {
761 ASSERT(isCompactPtrAlignedAddressOffset(value));
762 AssemblerType_T::repatchCompact(dataLabelCompact.dataLocation(), value);
763 }
764
765 DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest)
766 {
767 padBeforePatch();
768 m_assembler.movl_mr_disp8(address.offset, address.base, dest);
769 return DataLabelCompact(this);
770 }
771
772 void load8(BaseIndex address, RegisterID dest)
773 {
774 m_assembler.movzbl_mr(address.offset, address.base, address.index, address.scale, dest);
775 }
776
777 void load8(ImplicitAddress address, RegisterID dest)
778 {
779 m_assembler.movzbl_mr(address.offset, address.base, dest);
780 }
781
782 void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
783 {
784 m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest);
785 }
786
787 void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest)
788 {
789 m_assembler.movsbl_mr(address.offset, address.base, dest);
790 }
791
792 void zeroExtend8To32(RegisterID src, RegisterID dest)
793 {
794 m_assembler.movzbl_rr(src, dest);
795 }
796
797 void signExtend8To32(RegisterID src, RegisterID dest)
798 {
799 m_assembler.movsbl_rr(src, dest);
800 }
801
802 void load16(BaseIndex address, RegisterID dest)
803 {
804 m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
805 }
806
807 void load16(Address address, RegisterID dest)
808 {
809 m_assembler.movzwl_mr(address.offset, address.base, dest);
810 }
811
812 void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
813 {
814 m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest);
815 }
816
817 void load16SignedExtendTo32(Address address, RegisterID dest)
818 {
819 m_assembler.movswl_mr(address.offset, address.base, dest);
820 }
821
822 void zeroExtend16To32(RegisterID src, RegisterID dest)
823 {
824 m_assembler.movzwl_rr(src, dest);
825 }
826
827 void signExtend16To32(RegisterID src, RegisterID dest)
828 {
829 m_assembler.movswl_rr(src, dest);
830 }
831
832 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
833 {
834 padBeforePatch();
835 m_assembler.movl_rm_disp32(src, address.offset, address.base);
836 return DataLabel32(this);
837 }
838
839 void store32(RegisterID src, ImplicitAddress address)
840 {
841 m_assembler.movl_rm(src, address.offset, address.base);
842 }
843
844 void store32(RegisterID src, BaseIndex address)
845 {
846 m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
847 }
848
849 void store32(TrustedImm32 imm, ImplicitAddress address)
850 {
851 m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
852 }
853
854 void store32(TrustedImm32 imm, BaseIndex address)
855 {
856 m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale);
857 }
858
859 void storeZero32(ImplicitAddress address)
860 {
861 store32(TrustedImm32(0), address);
862 }
863
864 void storeZero32(BaseIndex address)
865 {
866 store32(TrustedImm32(0), address);
867 }
868
869 void store8(TrustedImm32 imm, Address address)
870 {
871 m_assembler.movb_i8m(static_cast<int8_t>(imm.m_value), address.offset, address.base);
872 }
873
874 void store8(TrustedImm32 imm, BaseIndex address)
875 {
876 m_assembler.movb_i8m(static_cast<int8_t>(imm.m_value), address.offset, address.base, address.index, address.scale);
877 }
878
879 static ALWAYS_INLINE RegisterID getUnusedRegister(BaseIndex address)
880 {
881 if (address.base != X86Registers::eax && address.index != X86Registers::eax)
882 return X86Registers::eax;
883
884 if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
885 return X86Registers::ebx;
886
887 ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
888 return X86Registers::ecx;
889 }
890
891 static ALWAYS_INLINE RegisterID getUnusedRegister(Address address)
892 {
893 if (address.base != X86Registers::eax)
894 return X86Registers::eax;
895
896 ASSERT(address.base != X86Registers::edx);
897 return X86Registers::edx;
898 }
899
900 void store8(RegisterID src, BaseIndex address)
901 {
902#if CPU(X86)
903 // On 32-bit x86 we can only store from the first 4 registers;
904 // esp..edi are mapped to the 'h' registers!
905 if (src >= 4) {
906 // Pick a temporary register.
907 RegisterID temp = getUnusedRegister(address);
908
909 // Swap to the temporary register to perform the store.
910 swap(src, temp);
911 m_assembler.movb_rm(temp, address.offset, address.base, address.index, address.scale);
912 swap(src, temp);
913 return;
914 }
915#endif
916 m_assembler.movb_rm(src, address.offset, address.base, address.index, address.scale);
917 }
918
919 void store8(RegisterID src, Address address)
920 {
921#if CPU(X86)
922 // On 32-bit x86 we can only store from the first 4 registers;
923 // esp..edi are mapped to the 'h' registers!
924 if (src >= 4) {
925 // Pick a temporary register.
926 RegisterID temp = getUnusedRegister(address);
927
928 // Swap to the temporary register to perform the store.
929 swap(src, temp);
930 m_assembler.movb_rm(temp, address.offset, address.base);
931 swap(src, temp);
932 return;
933 }
934#endif
935 m_assembler.movb_rm(src, address.offset, address.base);
936 }
937
938 void store16(RegisterID src, BaseIndex address)
939 {
940#if CPU(X86)
941 // On 32-bit x86 we can only store from the first 4 registers;
942 // esp..edi are mapped to the 'h' registers!
943 if (src >= 4) {
944 // Pick a temporary register.
945 RegisterID temp = getUnusedRegister(address);
946
947 // Swap to the temporary register to perform the store.
948 swap(src, temp);
949 m_assembler.movw_rm(temp, address.offset, address.base, address.index, address.scale);
950 swap(src, temp);
951 return;
952 }
953#endif
954 m_assembler.movw_rm(src, address.offset, address.base, address.index, address.scale);
955 }
956
957 void store16(RegisterID src, Address address)
958 {
959#if CPU(X86)
960 // On 32-bit x86 we can only store from the first 4 registers;
961 // esp..edi are mapped to the 'h' registers!
962 if (src >= 4) {
963 // Pick a temporary register.
964 RegisterID temp = getUnusedRegister(address);
965
966 // Swap to the temporary register to perform the store.
967 swap(src, temp);
968 m_assembler.movw_rm(temp, address.offset, address.base);
969 swap(src, temp);
970 return;
971 }
972#endif
973 m_assembler.movw_rm(src, address.offset, address.base);
974 }
975
976
977 // Floating-point operation:
978 //
979 // Presently only supports SSE, not x87 floating point.
980
981 void moveDouble(FPRegisterID src, FPRegisterID dest)
982 {
983 ASSERT(isSSE2Present());
984 if (src != dest)
985 m_assembler.movaps_rr(src, dest);
986 }
987
988 void loadDouble(TrustedImmPtr address, FPRegisterID dest)
989 {
990#if CPU(X86)
991 ASSERT(isSSE2Present());
992 m_assembler.movsd_mr(address.m_value, dest);
993#else
994 move(address, scratchRegister());
995 loadDouble(scratchRegister(), dest);
996#endif
997 }
998
999 void loadDouble(ImplicitAddress address, FPRegisterID dest)
1000 {
1001 ASSERT(isSSE2Present());
1002 m_assembler.movsd_mr(address.offset, address.base, dest);
1003 }
1004
1005 void loadDouble(BaseIndex address, FPRegisterID dest)
1006 {
1007 ASSERT(isSSE2Present());
1008 m_assembler.movsd_mr(address.offset, address.base, address.index, address.scale, dest);
1009 }
1010
1011 void loadFloat(ImplicitAddress address, FPRegisterID dest)
1012 {
1013 ASSERT(isSSE2Present());
1014 m_assembler.movss_mr(address.offset, address.base, dest);
1015 }
1016
1017 void loadFloat(BaseIndex address, FPRegisterID dest)
1018 {
1019 ASSERT(isSSE2Present());
1020 m_assembler.movss_mr(address.offset, address.base, address.index, address.scale, dest);
1021 }
1022
1023 void storeDouble(FPRegisterID src, ImplicitAddress address)
1024 {
1025 ASSERT(isSSE2Present());
1026 m_assembler.movsd_rm(src, address.offset, address.base);
1027 }
1028
1029 void storeDouble(FPRegisterID src, BaseIndex address)
1030 {
1031 ASSERT(isSSE2Present());
1032 m_assembler.movsd_rm(src, address.offset, address.base, address.index, address.scale);
1033 }
1034
1035 void storeFloat(FPRegisterID src, ImplicitAddress address)
1036 {
1037 ASSERT(isSSE2Present());
1038 m_assembler.movss_rm(src, address.offset, address.base);
1039 }
1040
1041 void storeFloat(FPRegisterID src, BaseIndex address)
1042 {
1043 ASSERT(isSSE2Present());
1044 m_assembler.movss_rm(src, address.offset, address.base, address.index, address.scale);
1045 }
1046
1047 void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
1048 {
1049 ASSERT(isSSE2Present());
1050 m_assembler.cvtsd2ss_rr(src, dst);
1051 }
1052
1053 void convertDoubleToFloat(Address address, FPRegisterID dst)
1054 {
1055 ASSERT(isSSE2Present());
1056 m_assembler.cvtsd2ss_mr(address.offset, address.base, dst);
1057 }
1058
1059 void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
1060 {
1061 ASSERT(isSSE2Present());
1062 m_assembler.cvtss2sd_rr(src, dst);
1063 }
1064
1065 void convertFloatToDouble(Address address, FPRegisterID dst)
1066 {
1067 ASSERT(isSSE2Present());
1068 m_assembler.cvtss2sd_mr(address.offset, address.base, dst);
1069 }
1070
1071 void addDouble(FPRegisterID src, FPRegisterID dest)
1072 {
1073 ASSERT(isSSE2Present());
1074 m_assembler.addsd_rr(src, dest);
1075 }
1076
1077 void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1078 {
1079 ASSERT(isSSE2Present());
1080 if (op1 == dest)
1081 addDouble(op2, dest);
1082 else {
1083 moveDouble(op2, dest);
1084 addDouble(op1, dest);
1085 }
1086 }
1087
1088 void addDouble(Address src, FPRegisterID dest)
1089 {
1090 ASSERT(isSSE2Present());
1091 m_assembler.addsd_mr(src.offset, src.base, dest);
1092 }
1093
1094 void addDouble(Address op1, FPRegisterID op2, FPRegisterID dest)
1095 {
1096 ASSERT(isSSE2Present());
1097 if (op2 == dest) {
1098 addDouble(op1, dest);
1099 return;
1100 }
1101
1102 loadDouble(op1, dest);
1103 addDouble(op2, dest);
1104 }
1105
1106 void addDouble(FPRegisterID op1, Address op2, FPRegisterID dest)
1107 {
1108 ASSERT(isSSE2Present());
1109 if (op1 == dest) {
1110 addDouble(op2, dest);
1111 return;
1112 }
1113
1114 loadDouble(op2, dest);
1115 addDouble(op1, dest);
1116 }
1117
1118 void addFloat(FPRegisterID src, FPRegisterID dest)
1119 {
1120 ASSERT(isSSE2Present());
1121 m_assembler.addss_rr(src, dest);
1122 }
1123
1124 void addFloat(Address src, FPRegisterID dest)
1125 {
1126 ASSERT(isSSE2Present());
1127 m_assembler.addss_mr(src.offset, src.base, dest);
1128 }
1129
1130 void addFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1131 {
1132 ASSERT(isSSE2Present());
1133 if (op1 == dest)
1134 addFloat(op2, dest);
1135 else {
1136 moveDouble(op2, dest);
1137 addFloat(op1, dest);
1138 }
1139 }
1140
1141 void addFloat(Address op1, FPRegisterID op2, FPRegisterID dest)
1142 {
1143 ASSERT(isSSE2Present());
1144 if (op2 == dest) {
1145 addFloat(op1, dest);
1146 return;
1147 }
1148
1149 loadFloat(op1, dest);
1150 addFloat(op2, dest);
1151 }
1152
1153 void addFloat(FPRegisterID op1, Address op2, FPRegisterID dest)
1154 {
1155 ASSERT(isSSE2Present());
1156 if (op1 == dest) {
1157 addFloat(op2, dest);
1158 return;
1159 }
1160
1161 loadFloat(op2, dest);
1162 addFloat(op1, dest);
1163 }
1164
1165 void divDouble(FPRegisterID src, FPRegisterID dest)
1166 {
1167 ASSERT(isSSE2Present());
1168 m_assembler.divsd_rr(src, dest);
1169 }
1170
1171 void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1172 {
1173 // B := A / B is invalid.
1174 ASSERT(op1 == dest || op2 != dest);
1175
1176 moveDouble(op1, dest);
1177 divDouble(op2, dest);
1178 }
1179
1180 void divDouble(Address src, FPRegisterID dest)
1181 {
1182 ASSERT(isSSE2Present());
1183 m_assembler.divsd_mr(src.offset, src.base, dest);
1184 }
1185
1186 void divFloat(FPRegisterID src, FPRegisterID dest)
1187 {
1188 ASSERT(isSSE2Present());
1189 m_assembler.divss_rr(src, dest);
1190 }
1191
1192 void divFloat(Address src, FPRegisterID dest)
1193 {
1194 ASSERT(isSSE2Present());
1195 m_assembler.divss_mr(src.offset, src.base, dest);
1196 }
1197
1198 void subDouble(FPRegisterID src, FPRegisterID dest)
1199 {
1200 ASSERT(isSSE2Present());
1201 m_assembler.subsd_rr(src, dest);
1202 }
1203
1204 void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1205 {
1206 // B := A - B is invalid.
1207 ASSERT(op1 == dest || op2 != dest);
1208
1209 moveDouble(op1, dest);
1210 subDouble(op2, dest);
1211 }
1212
1213 void subDouble(Address src, FPRegisterID dest)
1214 {
1215 ASSERT(isSSE2Present());
1216 m_assembler.subsd_mr(src.offset, src.base, dest);
1217 }
1218
1219 void subFloat(FPRegisterID src, FPRegisterID dest)
1220 {
1221 ASSERT(isSSE2Present());
1222 m_assembler.subss_rr(src, dest);
1223 }
1224
1225 void subFloat(Address src, FPRegisterID dest)
1226 {
1227 ASSERT(isSSE2Present());
1228 m_assembler.subss_mr(src.offset, src.base, dest);
1229 }
1230
1231 void mulDouble(FPRegisterID src, FPRegisterID dest)
1232 {
1233 ASSERT(isSSE2Present());
1234 m_assembler.mulsd_rr(src, dest);
1235 }
1236
1237 void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1238 {
1239 ASSERT(isSSE2Present());
1240 if (op1 == dest)
1241 mulDouble(op2, dest);
1242 else {
1243 moveDouble(op2, dest);
1244 mulDouble(op1, dest);
1245 }
1246 }
1247
1248 void mulDouble(Address src, FPRegisterID dest)
1249 {
1250 ASSERT(isSSE2Present());
1251 m_assembler.mulsd_mr(src.offset, src.base, dest);
1252 }
1253
1254 void mulDouble(Address op1, FPRegisterID op2, FPRegisterID dest)
1255 {
1256 ASSERT(isSSE2Present());
1257 if (op2 == dest) {
1258 mulDouble(op1, dest);
1259 return;
1260 }
1261 loadDouble(op1, dest);
1262 mulDouble(op2, dest);
1263 }
1264
1265 void mulDouble(FPRegisterID op1, Address op2, FPRegisterID dest)
1266 {
1267 ASSERT(isSSE2Present());
1268 if (op1 == dest) {
1269 mulDouble(op2, dest);
1270 return;
1271 }
1272 loadDouble(op2, dest);
1273 mulDouble(op1, dest);
1274 }
1275
1276 void mulFloat(FPRegisterID src, FPRegisterID dest)
1277 {
1278 ASSERT(isSSE2Present());
1279 m_assembler.mulss_rr(src, dest);
1280 }
1281
1282 void mulFloat(Address src, FPRegisterID dest)
1283 {
1284 ASSERT(isSSE2Present());
1285 m_assembler.mulss_mr(src.offset, src.base, dest);
1286 }
1287
1288 void mulFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1289 {
1290 ASSERT(isSSE2Present());
1291 if (op1 == dest)
1292 mulFloat(op2, dest);
1293 else {
1294 moveDouble(op2, dest);
1295 mulFloat(op1, dest);
1296 }
1297 }
1298
1299 void mulFloat(Address op1, FPRegisterID op2, FPRegisterID dest)
1300 {
1301 ASSERT(isSSE2Present());
1302 if (op2 == dest) {
1303 mulFloat(op1, dest);
1304 return;
1305 }
1306 loadFloat(op1, dest);
1307 mulFloat(op2, dest);
1308 }
1309
1310 void mulFloat(FPRegisterID op1, Address op2, FPRegisterID dest)
1311 {
1312 ASSERT(isSSE2Present());
1313 if (op1 == dest) {
1314 mulFloat(op2, dest);
1315 return;
1316 }
1317 loadFloat(op2, dest);
1318 mulFloat(op1, dest);
1319 }
1320
1321 void andDouble(FPRegisterID src, FPRegisterID dst)
1322 {
1323 // ANDPS is defined on 128bits and is shorter than ANDPD.
1324 m_assembler.andps_rr(src, dst);
1325 }
1326
1327 void andDouble(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
1328 {
1329 if (src1 == dst)
1330 andDouble(src2, dst);
1331 else {
1332 moveDouble(src2, dst);
1333 andDouble(src1, dst);
1334 }
1335 }
1336
1337 void andFloat(FPRegisterID src, FPRegisterID dst)
1338 {
1339 m_assembler.andps_rr(src, dst);
1340 }
1341
1342 void andFloat(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
1343 {
1344 if (src1 == dst)
1345 andFloat(src2, dst);
1346 else {
1347 moveDouble(src2, dst);
1348 andFloat(src1, dst);
1349 }
1350 }
1351
1352 void xorDouble(FPRegisterID src, FPRegisterID dst)
1353 {
1354 m_assembler.xorps_rr(src, dst);
1355 }
1356
1357 void xorDouble(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
1358 {
1359 if (src1 == dst)
1360 xorDouble(src2, dst);
1361 else {
1362 moveDouble(src2, dst);
1363 xorDouble(src1, dst);
1364 }
1365 }
1366
1367 void xorFloat(FPRegisterID src, FPRegisterID dst)
1368 {
1369 m_assembler.xorps_rr(src, dst);
1370 }
1371
1372 void xorFloat(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
1373 {
1374 if (src1 == dst)
1375 xorFloat(src2, dst);
1376 else {
1377 moveDouble(src2, dst);
1378 xorFloat(src1, dst);
1379 }
1380 }
1381
1382 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1383 {
1384 ASSERT(isSSE2Present());
1385 m_assembler.cvtsi2sd_rr(src, dest);
1386 }
1387
1388 void convertInt32ToDouble(Address src, FPRegisterID dest)
1389 {
1390 ASSERT(isSSE2Present());
1391 m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
1392 }
1393
1394 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1395 {
1396 ASSERT(isSSE2Present());
1397
1398 if (cond & DoubleConditionBitInvert)
1399 m_assembler.ucomisd_rr(left, right);
1400 else
1401 m_assembler.ucomisd_rr(right, left);
1402 return jumpAfterFloatingPointCompare(cond, left, right);
1403 }
1404
1405 Jump branchFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1406 {
1407 ASSERT(isSSE2Present());
1408
1409 if (cond & DoubleConditionBitInvert)
1410 m_assembler.ucomiss_rr(left, right);
1411 else
1412 m_assembler.ucomiss_rr(right, left);
1413 return jumpAfterFloatingPointCompare(cond, left, right);
1414 }
1415
1416 // Truncates 'src' to an integer, and places the resulting 'dest'.
1417 // If the result is not representable as a 32 bit value, branch.
1418 // May also branch for some values that are representable in 32 bits
1419 // (specifically, in this case, INT_MIN).
1420 enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1421 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1422 {
1423 ASSERT(isSSE2Present());
1424 m_assembler.cvttsd2si_rr(src, dest);
1425 return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000));
1426 }
1427
1428 void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
1429 {
1430 ASSERT(isSSE2Present());
1431 m_assembler.cvttsd2si_rr(src, dest);
1432 }
1433
1434#if CPU(X86_64)
1435 void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
1436 {
1437 ASSERT(isSSE2Present());
1438 m_assembler.cvttsd2siq_rr(src, dest);
1439 }
1440#endif
1441
1442 // Convert 'src' to an integer, and places the resulting 'dest'.
1443 // If the result is not representable as a 32 bit value, branch.
1444 // May also branch for some values that are representable in 32 bits
1445 // (specifically, in this case, 0).
1446 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp, bool negZeroCheck = true)
1447 {
1448 ASSERT(isSSE2Present());
1449 m_assembler.cvttsd2si_rr(src, dest);
1450
1451 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
1452#if CPU(X86_64)
1453 if (negZeroCheck) {
1454 Jump valueIsNonZero = branchTest32(NonZero, dest);
1455 m_assembler.movmskpd_rr(src, scratchRegister());
1456 failureCases.append(branchTest32(NonZero, scratchRegister(), TrustedImm32(1)));
1457 valueIsNonZero.link(this);
1458 }
1459#else
1460 if (negZeroCheck)
1461 failureCases.append(branchTest32(Zero, dest));
1462#endif
1463
1464 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1465 convertInt32ToDouble(dest, fpTemp);
1466 m_assembler.ucomisd_rr(fpTemp, src);
1467 failureCases.append(m_assembler.jp());
1468 failureCases.append(m_assembler.jne());
1469 }
1470
1471 void moveZeroToDouble(FPRegisterID reg)
1472 {
1473 m_assembler.xorps_rr(reg, reg);
1474 }
1475
1476 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
1477 {
1478 ASSERT(isSSE2Present());
1479 m_assembler.xorpd_rr(scratch, scratch);
1480 return branchDouble(DoubleNotEqual, reg, scratch);
1481 }
1482
1483 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
1484 {
1485 ASSERT(isSSE2Present());
1486 m_assembler.xorpd_rr(scratch, scratch);
1487 return branchDouble(DoubleEqualOrUnordered, reg, scratch);
1488 }
1489
1490 void lshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
1491 {
1492 ASSERT(isSSE2Present());
1493 m_assembler.psllq_i8r(imm.m_value, reg);
1494 }
1495
1496 void rshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
1497 {
1498 ASSERT(isSSE2Present());
1499 m_assembler.psrlq_i8r(imm.m_value, reg);
1500 }
1501
1502 void orPacked(XMMRegisterID src, XMMRegisterID dst)
1503 {
1504 ASSERT(isSSE2Present());
1505 m_assembler.por_rr(src, dst);
1506 }
1507
1508 void move32ToFloat(RegisterID src, XMMRegisterID dst)
1509 {
1510 ASSERT(isSSE2Present());
1511 m_assembler.movd_rr(src, dst);
1512 }
1513
1514 void moveFloatTo32(XMMRegisterID src, RegisterID dst)
1515 {
1516 ASSERT(isSSE2Present());
1517 m_assembler.movd_rr(src, dst);
1518 }
1519
1520 // Stack manipulation operations:
1521 //
1522 // The ABI is assumed to provide a stack abstraction to memory,
1523 // containing machine word sized units of data. Push and pop
1524 // operations add and remove a single register sized unit of data
1525 // to or from the stack. Peek and poke operations read or write
1526 // values on the stack, without moving the current stack position.
1527
1528 void pop(RegisterID dest)
1529 {
1530 m_assembler.pop_r(dest);
1531 }
1532
1533 void push(RegisterID src)
1534 {
1535 m_assembler.push_r(src);
1536 }
1537
1538 void push(Address address)
1539 {
1540 m_assembler.push_m(address.offset, address.base);
1541 }
1542
1543 void push(TrustedImm32 imm)
1544 {
1545 m_assembler.push_i32(imm.m_value);
1546 }
1547
1548
1549 // Register move operations:
1550 //
1551 // Move values in registers.
1552
1553 void move(TrustedImm32 imm, RegisterID dest)
1554 {
1555 // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
1556 // may be useful to have a separate version that sign extends the value?
1557 if (!imm.m_value)
1558 m_assembler.xorl_rr(dest, dest);
1559 else
1560 m_assembler.movl_i32r(imm.m_value, dest);
1561 }
1562
1563#if CPU(X86_64)
1564 void move(RegisterID src, RegisterID dest)
1565 {
1566 // Note: on 64-bit this is is a full register move; perhaps it would be
1567 // useful to have separate move32 & movePtr, with move32 zero extending?
1568 if (src != dest)
1569 m_assembler.movq_rr(src, dest);
1570 }
1571
1572 void move(TrustedImmPtr imm, RegisterID dest)
1573 {
1574 if (!imm.m_value)
1575 m_assembler.xorq_rr(dest, dest);
1576 else
1577 m_assembler.movq_i64r(imm.asIntptr(), dest);
1578 }
1579
1580 void move(TrustedImm64 imm, RegisterID dest)
1581 {
1582 if (!imm.m_value)
1583 m_assembler.xorq_rr(dest, dest);
1584 else
1585 m_assembler.movq_i64r(imm.m_value, dest);
1586 }
1587
1588 void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
1589 {
1590 ASSERT(isSSE2Present());
1591
1592 if (cond & DoubleConditionBitInvert)
1593 m_assembler.ucomisd_rr(left, right);
1594 else
1595 m_assembler.ucomisd_rr(right, left);
1596 moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest);
1597 }
1598
1599 void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1600 {
1601 ASSERT(isSSE2Present());
1602
1603 if (thenCase != dest && elseCase != dest) {
1604 move(elseCase, dest);
1605 elseCase = dest;
1606 }
1607
1608 RegisterID src;
1609 if (elseCase == dest)
1610 src = thenCase;
1611 else {
1612 cond = invert(cond);
1613 src = elseCase;
1614 }
1615
1616 if (cond & DoubleConditionBitInvert)
1617 m_assembler.ucomisd_rr(left, right);
1618 else
1619 m_assembler.ucomisd_rr(right, left);
1620 moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest);
1621 }
1622
1623 void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
1624 {
1625 ASSERT(isSSE2Present());
1626
1627 if (cond & DoubleConditionBitInvert)
1628 m_assembler.ucomiss_rr(left, right);
1629 else
1630 m_assembler.ucomiss_rr(right, left);
1631 moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest);
1632 }
1633
1634 void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1635 {
1636 ASSERT(isSSE2Present());
1637
1638 if (thenCase != dest && elseCase != dest) {
1639 move(elseCase, dest);
1640 elseCase = dest;
1641 }
1642
1643 RegisterID src;
1644 if (elseCase == dest)
1645 src = thenCase;
1646 else {
1647 cond = invert(cond);
1648 src = elseCase;
1649 }
1650
1651 if (cond & DoubleConditionBitInvert)
1652 m_assembler.ucomiss_rr(left, right);
1653 else
1654 m_assembler.ucomiss_rr(right, left);
1655 moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest);
1656 }
1657
1658 void swap(RegisterID reg1, RegisterID reg2)
1659 {
1660 if (reg1 != reg2)
1661 m_assembler.xchgq_rr(reg1, reg2);
1662 }
1663
1664 void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
1665 {
1666 if (!imm.m_value)
1667 m_assembler.xorq_rr(dest, dest);
1668 else
1669 m_assembler.mov_i32r(imm.m_value, dest);
1670 }
1671
1672 void signExtend32ToPtr(RegisterID src, RegisterID dest)
1673 {
1674 m_assembler.movsxd_rr(src, dest);
1675 }
1676
1677 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1678 {
1679 m_assembler.movl_rr(src, dest);
1680 }
1681
1682 void zeroExtend32ToPtr(TrustedImm32 src, RegisterID dest)
1683 {
1684 m_assembler.movl_i32r(src.m_value, dest);
1685 }
1686#else
1687 void move(RegisterID src, RegisterID dest)
1688 {
1689 if (src != dest)
1690 m_assembler.movl_rr(src, dest);
1691 }
1692
1693 void move(TrustedImmPtr imm, RegisterID dest)
1694 {
1695 if (!imm.m_value)
1696 m_assembler.xorl_rr(dest, dest);
1697 else
1698 m_assembler.movl_i32r(imm.asIntptr(), dest);
1699 }
1700
1701 void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
1702 {
1703 ASSERT(isSSE2Present());
1704
1705 if (cond & DoubleConditionBitInvert)
1706 m_assembler.ucomisd_rr(left, right);
1707 else
1708 m_assembler.ucomisd_rr(right, left);
1709
1710 if (cond == DoubleEqual) {
1711 if (left == right) {
1712 m_assembler.cmovnpl_rr(src, dest);
1713 return;
1714 }
1715
1716 Jump isUnordered(m_assembler.jp());
1717 m_assembler.cmovel_rr(src, dest);
1718 isUnordered.link(this);
1719 return;
1720 }
1721
1722 if (cond == DoubleNotEqualOrUnordered) {
1723 if (left == right) {
1724 m_assembler.cmovpl_rr(src, dest);
1725 return;
1726 }
1727
1728 m_assembler.cmovpl_rr(src, dest);
1729 m_assembler.cmovnel_rr(src, dest);
1730 return;
1731 }
1732
1733 ASSERT(!(cond & DoubleConditionBitSpecial));
1734 m_assembler.cmovl_rr(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits), src, dest);
1735 }
1736
1737 void swap(RegisterID reg1, RegisterID reg2)
1738 {
1739 if (reg1 != reg2)
1740 m_assembler.xchgl_rr(reg1, reg2);
1741 }
1742
1743 void signExtend32ToPtr(RegisterID src, RegisterID dest)
1744 {
1745 move(src, dest);
1746 }
1747
1748 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1749 {
1750 move(src, dest);
1751 }
1752#endif
1753
1754 void swap32(RegisterID src, RegisterID dest)
1755 {
1756 m_assembler.xchgl_rr(src, dest);
1757 }
1758
1759 void swap32(RegisterID src, Address dest)
1760 {
1761 m_assembler.xchgl_rm(src, dest.offset, dest.base);
1762 }
1763
1764 void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
1765 {
1766 m_assembler.cmpl_rr(right, left);
1767 cmov(x86Condition(cond), src, dest);
1768 }
1769
1770 void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1771 {
1772 m_assembler.cmpl_rr(right, left);
1773
1774 if (thenCase != dest && elseCase != dest) {
1775 move(elseCase, dest);
1776 elseCase = dest;
1777 }
1778
1779 if (elseCase == dest)
1780 cmov(x86Condition(cond), thenCase, dest);
1781 else
1782 cmov(x86Condition(invert(cond)), elseCase, dest);
1783 }
1784
1785 void moveConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase,