1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef MacroAssemblerX86Common_h
27#define MacroAssemblerX86Common_h
28
29#if ENABLE(ASSEMBLER)
30
31#include "X86Assembler.h"
32#include "AbstractMacroAssembler.h"
33
34namespace JSC {
35
36class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
37protected:
38#if CPU(X86_64)
39 static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
40#endif
41
42 static const int DoubleConditionBitInvert = 0x10;
43 static const int DoubleConditionBitSpecial = 0x20;
44 static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
45
46public:
47 typedef X86Assembler::FPRegisterID FPRegisterID;
48 typedef X86Assembler::XMMRegisterID XMMRegisterID;
49
50 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
51 {
52 return value >= -128 && value <= 127;
53 }
54
55 enum RelationalCondition {
56 Equal = X86Assembler::ConditionE,
57 NotEqual = X86Assembler::ConditionNE,
58 Above = X86Assembler::ConditionA,
59 AboveOrEqual = X86Assembler::ConditionAE,
60 Below = X86Assembler::ConditionB,
61 BelowOrEqual = X86Assembler::ConditionBE,
62 GreaterThan = X86Assembler::ConditionG,
63 GreaterThanOrEqual = X86Assembler::ConditionGE,
64 LessThan = X86Assembler::ConditionL,
65 LessThanOrEqual = X86Assembler::ConditionLE
66 };
67
68 enum ResultCondition {
69 Overflow = X86Assembler::ConditionO,
70 Signed = X86Assembler::ConditionS,
71 Zero = X86Assembler::ConditionE,
72 NonZero = X86Assembler::ConditionNE
73 };
74
75 enum DoubleCondition {
76 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
77 DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
78 DoubleNotEqual = X86Assembler::ConditionNE,
79 DoubleGreaterThan = X86Assembler::ConditionA,
80 DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
81 DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
82 DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
83 // If either operand is NaN, these conditions always evaluate to true.
84 DoubleEqualOrUnordered = X86Assembler::ConditionE,
85 DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
86 DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
87 DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
88 DoubleLessThanOrUnordered = X86Assembler::ConditionB,
89 DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
90 };
91 COMPILE_ASSERT(
92 !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
93 DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
94
95 static const RegisterID stackPointerRegister = X86Registers::esp;
96
97#if ENABLE(JIT_CONSTANT_BLINDING)
98 static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
99#if CPU(X86_64)
100 static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
101#if OS(DARWIN) // On 64-bit systems other than DARWIN uint64_t and uintptr_t are the same type so overload is prohibited.
102 static bool shouldBlindForSpecificArch(uintptr_t value) { return value >= 0x00ffffff; }
103#endif
104#endif
105#endif
106
107 // Integer arithmetic operations:
108 //
109 // Operations are typically two operand - operation(source, srcDst)
110 // For many operations the source may be an TrustedImm32, the srcDst operand
111 // may often be a memory location (explictly described using an Address
112 // object).
113
114 void add32(RegisterID src, RegisterID dest)
115 {
116 m_assembler.addl_rr(src, dst: dest);
117 }
118
119 void add32(TrustedImm32 imm, Address address)
120 {
121 m_assembler.addl_im(imm: imm.m_value, offset: address.offset, base: address.base);
122 }
123
124 void add32(TrustedImm32 imm, RegisterID dest)
125 {
126 m_assembler.addl_ir(imm: imm.m_value, dst: dest);
127 }
128
129 void add32(Address src, RegisterID dest)
130 {
131 m_assembler.addl_mr(offset: src.offset, base: src.base, dst: dest);
132 }
133
134 void add32(RegisterID src, Address dest)
135 {
136 m_assembler.addl_rm(src, offset: dest.offset, base: dest.base);
137 }
138
139 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
140 {
141 m_assembler.leal_mr(offset: imm.m_value, base: src, dst: dest);
142 }
143
144 void and32(RegisterID src, RegisterID dest)
145 {
146 m_assembler.andl_rr(src, dst: dest);
147 }
148
149 void add32(RegisterID a, RegisterID b, RegisterID dest)
150 {
151 x86Lea32(index: BaseIndex(a, b, TimesOne), dest);
152 }
153
154 void x86Lea32(BaseIndex index, RegisterID dest)
155 {
156 if (!index.scale && !index.offset) {
157 if (index.base == dest) {
158 add32(src: index.index, dest);
159 return;
160 }
161 if (index.index == dest) {
162 add32(src: index.base, dest);
163 return;
164 }
165 }
166 m_assembler.leal_mr(offset: index.offset, base: index.base, index: index.index, scale: index.scale, dst: dest);
167 }
168
169 void and32(TrustedImm32 imm, RegisterID dest)
170 {
171 m_assembler.andl_ir(imm: imm.m_value, dst: dest);
172 }
173
174 void and32(RegisterID src, Address dest)
175 {
176 m_assembler.andl_rm(src, offset: dest.offset, base: dest.base);
177 }
178
179 void and32(Address src, RegisterID dest)
180 {
181 m_assembler.andl_mr(offset: src.offset, base: src.base, dst: dest);
182 }
183
184 void and32(TrustedImm32 imm, Address address)
185 {
186 m_assembler.andl_im(imm: imm.m_value, offset: address.offset, base: address.base);
187 }
188
189 void and32(RegisterID op1, RegisterID op2, RegisterID dest)
190 {
191 if (op1 == op2)
192 zeroExtend32ToPtr(src: op1, dest);
193 else if (op1 == dest)
194 and32(src: op2, dest);
195 else {
196 move(src: op2, dest);
197 and32(src: op1, dest);
198 }
199 }
200
201 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
202 {
203 move(src, dest);
204 and32(imm, dest);
205 }
206
207 void lshift32(RegisterID shift_amount, RegisterID dest)
208 {
209 ASSERT(shift_amount != dest);
210
211 if (shift_amount == X86Registers::ecx)
212 m_assembler.shll_CLr(dst: dest);
213 else {
214 // On x86 we can only shift by ecx; if asked to shift by another register we'll
215 // need rejig the shift amount into ecx first, and restore the registers afterwards.
216 // If we dest is ecx, then shift the swapped register!
217 swap(reg1: shift_amount, reg2: X86Registers::ecx);
218 m_assembler.shll_CLr(dst: dest == X86Registers::ecx ? shift_amount : dest);
219 swap(reg1: shift_amount, reg2: X86Registers::ecx);
220 }
221 }
222
223 void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
224 {
225 ASSERT(shift_amount != dest);
226
227 if (src != dest)
228 move(src, dest);
229 lshift32(shift_amount, dest);
230 }
231
232 void lshift32(TrustedImm32 imm, RegisterID dest)
233 {
234 m_assembler.shll_i8r(imm: imm.m_value, dst: dest);
235 }
236
237 void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
238 {
239 if (src != dest)
240 move(src, dest);
241 lshift32(imm, dest);
242 }
243
244 void mul32(RegisterID src, RegisterID dest)
245 {
246 m_assembler.imull_rr(src, dst: dest);
247 }
248
249 void mul32(RegisterID op1, RegisterID op2, RegisterID dest)
250 {
251 if (op2 == dest) {
252 mul32(src: op1, dest);
253 } else {
254 move(src: op1, dest);
255 mul32(src: op2, dest);
256 }
257 }
258
259 void mul32(Address src, RegisterID dest)
260 {
261 m_assembler.imull_mr(offset: src.offset, base: src.base, dst: dest);
262 }
263
264 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
265 {
266 m_assembler.imull_i32r(src, value: imm.m_value, dst: dest);
267 }
268
269 void neg32(RegisterID srcDest)
270 {
271 m_assembler.negl_r(dst: srcDest);
272 }
273
274 void neg32(Address srcDest)
275 {
276 m_assembler.negl_m(offset: srcDest.offset, base: srcDest.base);
277 }
278
279 void or32(RegisterID src, RegisterID dest)
280 {
281 m_assembler.orl_rr(src, dst: dest);
282 }
283
284 void or32(TrustedImm32 imm, RegisterID dest)
285 {
286 m_assembler.orl_ir(imm: imm.m_value, dst: dest);
287 }
288
289 void or32(RegisterID src, Address dest)
290 {
291 m_assembler.orl_rm(src, offset: dest.offset, base: dest.base);
292 }
293
294 void or32(Address src, RegisterID dest)
295 {
296 m_assembler.orl_mr(offset: src.offset, base: src.base, dst: dest);
297 }
298
299 void or32(TrustedImm32 imm, Address address)
300 {
301 m_assembler.orl_im(imm: imm.m_value, offset: address.offset, base: address.base);
302 }
303
304 void or32(RegisterID op1, RegisterID op2, RegisterID dest)
305 {
306 if (op1 == op2)
307 zeroExtend32ToPtr(src: op1, dest);
308 else if (op1 == dest)
309 or32(src: op2, dest);
310 else {
311 move(src: op2, dest);
312 or32(src: op1, dest);
313 }
314 }
315
316 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
317 {
318 move(src, dest);
319 or32(imm, dest);
320 }
321
322 void rshift32(RegisterID shift_amount, RegisterID dest)
323 {
324 ASSERT(shift_amount != dest);
325
326 if (shift_amount == X86Registers::ecx)
327 m_assembler.sarl_CLr(dst: dest);
328 else {
329 // On x86 we can only shift by ecx; if asked to shift by another register we'll
330 // need rejig the shift amount into ecx first, and restore the registers afterwards.
331 // If we dest is ecx, then shift the swapped register!
332 swap(reg1: shift_amount, reg2: X86Registers::ecx);
333 m_assembler.sarl_CLr(dst: dest == X86Registers::ecx ? shift_amount : dest);
334 swap(reg1: shift_amount, reg2: X86Registers::ecx);
335 }
336 }
337
338 void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
339 {
340 ASSERT(shift_amount != dest);
341
342 if (src != dest)
343 move(src, dest);
344 rshift32(shift_amount, dest);
345 }
346
347 void rshift32(TrustedImm32 imm, RegisterID dest)
348 {
349 m_assembler.sarl_i8r(imm: imm.m_value, dst: dest);
350 }
351
352 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
353 {
354 if (src != dest)
355 move(src, dest);
356 rshift32(imm, dest);
357 }
358
359 void urshift32(RegisterID shift_amount, RegisterID dest)
360 {
361 ASSERT(shift_amount != dest);
362
363 if (shift_amount == X86Registers::ecx)
364 m_assembler.shrl_CLr(dst: dest);
365 else {
366 // On x86 we can only shift by ecx; if asked to shift by another register we'll
367 // need rejig the shift amount into ecx first, and restore the registers afterwards.
368 // If we dest is ecx, then shift the swapped register!
369 swap(reg1: shift_amount, reg2: X86Registers::ecx);
370 m_assembler.shrl_CLr(dst: dest == X86Registers::ecx ? shift_amount : dest);
371 swap(reg1: shift_amount, reg2: X86Registers::ecx);
372 }
373 }
374
375 void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
376 {
377 ASSERT(shift_amount != dest);
378
379 if (src != dest)
380 move(src, dest);
381 urshift32(shift_amount, dest);
382 }
383
384 void urshift32(TrustedImm32 imm, RegisterID dest)
385 {
386 m_assembler.shrl_i8r(imm: imm.m_value, dst: dest);
387 }
388
389 void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
390 {
391 if (src != dest)
392 move(src, dest);
393 urshift32(imm, dest);
394 }
395
396 void sub32(RegisterID src, RegisterID dest)
397 {
398 m_assembler.subl_rr(src, dst: dest);
399 }
400
401 void sub32(TrustedImm32 imm, RegisterID dest)
402 {
403 m_assembler.subl_ir(imm: imm.m_value, dst: dest);
404 }
405
406 void sub32(TrustedImm32 imm, Address address)
407 {
408 m_assembler.subl_im(imm: imm.m_value, offset: address.offset, base: address.base);
409 }
410
411 void sub32(Address src, RegisterID dest)
412 {
413 m_assembler.subl_mr(offset: src.offset, base: src.base, dst: dest);
414 }
415
416 void sub32(RegisterID src, Address dest)
417 {
418 m_assembler.subl_rm(src, offset: dest.offset, base: dest.base);
419 }
420
421 void xor32(RegisterID src, RegisterID dest)
422 {
423 m_assembler.xorl_rr(src, dst: dest);
424 }
425
426 void xor32(TrustedImm32 imm, Address dest)
427 {
428 if (imm.m_value == -1)
429 m_assembler.notl_m(offset: dest.offset, base: dest.base);
430 else
431 m_assembler.xorl_im(imm: imm.m_value, offset: dest.offset, base: dest.base);
432 }
433
434 void xor32(TrustedImm32 imm, RegisterID dest)
435 {
436 if (imm.m_value == -1)
437 m_assembler.notl_r(dst: dest);
438 else
439 m_assembler.xorl_ir(imm: imm.m_value, dst: dest);
440 }
441
442 void xor32(RegisterID src, Address dest)
443 {
444 m_assembler.xorl_rm(src, offset: dest.offset, base: dest.base);
445 }
446
447 void xor32(Address src, RegisterID dest)
448 {
449 m_assembler.xorl_mr(offset: src.offset, base: src.base, dst: dest);
450 }
451
452 void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
453 {
454 if (op1 == op2)
455 move(imm: TrustedImm32(0), dest);
456 else if (op1 == dest)
457 xor32(src: op2, dest);
458 else {
459 move(src: op2, dest);
460 xor32(src: op1, dest);
461 }
462 }
463
464 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
465 {
466 move(src, dest);
467 xor32(imm, dest);
468 }
469
470 void sqrtDouble(FPRegisterID src, FPRegisterID dst)
471 {
472 m_assembler.sqrtsd_rr(src, dst);
473 }
474
475 void absDouble(FPRegisterID src, FPRegisterID dst)
476 {
477 ASSERT(src != dst);
478 static const double negativeZeroConstant = -0.0;
479 loadDouble(address: &negativeZeroConstant, dest: dst);
480 m_assembler.andnpd_rr(src, dst);
481 }
482
483 void negateDouble(FPRegisterID src, FPRegisterID dst)
484 {
485 ASSERT(src != dst);
486 static const double negativeZeroConstant = -0.0;
487 loadDouble(address: &negativeZeroConstant, dest: dst);
488 m_assembler.xorpd_rr(src, dst);
489 }
490
491
492 // Memory access operations:
493 //
494 // Loads are of the form load(address, destination) and stores of the form
495 // store(source, address). The source for a store may be an TrustedImm32. Address
496 // operand objects to loads and store will be implicitly constructed if a
497 // register is passed.
498
499 void load32(ImplicitAddress address, RegisterID dest)
500 {
501 m_assembler.movl_mr(offset: address.offset, base: address.base, dst: dest);
502 }
503
504 void load32(BaseIndex address, RegisterID dest)
505 {
506 m_assembler.movl_mr(offset: address.offset, base: address.base, index: address.index, scale: address.scale, dst: dest);
507 }
508
509 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
510 {
511 load32(address, dest);
512 }
513
514 void load16Unaligned(ImplicitAddress address, RegisterID dest)
515 {
516 load16(address, dest);
517 }
518
519 void load16Unaligned(BaseIndex address, RegisterID dest)
520 {
521 load16(address, dest);
522 }
523
524 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
525 {
526 padBeforePatch();
527 m_assembler.movl_mr_disp32(offset: address.offset, base: address.base, dst: dest);
528 return DataLabel32(this);
529 }
530
531 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
532 {
533 padBeforePatch();
534 m_assembler.movl_mr_disp8(offset: address.offset, base: address.base, dst: dest);
535 return DataLabelCompact(this);
536 }
537
538 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
539 {
540 ASSERT(isCompactPtrAlignedAddressOffset(value));
541 AssemblerType_T::repatchCompact(where: dataLabelCompact.dataLocation(), value);
542 }
543
544 DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest)
545 {
546 padBeforePatch();
547 m_assembler.movl_mr_disp8(offset: address.offset, base: address.base, dst: dest);
548 return DataLabelCompact(this);
549 }
550
551 void load8(BaseIndex address, RegisterID dest)
552 {
553 m_assembler.movzbl_mr(offset: address.offset, base: address.base, index: address.index, scale: address.scale, dst: dest);
554 }
555
556 void load8(ImplicitAddress address, RegisterID dest)
557 {
558 m_assembler.movzbl_mr(offset: address.offset, base: address.base, dst: dest);
559 }
560
561 void load8Signed(BaseIndex address, RegisterID dest)
562 {
563 m_assembler.movsbl_mr(offset: address.offset, base: address.base, index: address.index, scale: address.scale, dst: dest);
564 }
565
566 void load8Signed(ImplicitAddress address, RegisterID dest)
567 {
568 m_assembler.movsbl_mr(offset: address.offset, base: address.base, dst: dest);
569 }
570
571 void load16(BaseIndex address, RegisterID dest)
572 {
573 m_assembler.movzwl_mr(offset: address.offset, base: address.base, index: address.index, scale: address.scale, dst: dest);
574 }
575
576 void load16(ImplicitAddress address, RegisterID dest)
577 {
578 m_assembler.movzwl_mr(offset: address.offset, base: address.base, dst: dest);
579 }
580
581 void load16(Address address, RegisterID dest)
582 {
583 m_assembler.movzwl_mr(offset: address.offset, base: address.base, dst: dest);
584 }
585
586 void load16Signed(BaseIndex address, RegisterID dest)
587 {
588 m_assembler.movswl_mr(offset: address.offset, base: address.base, index: address.index, scale: address.scale, dst: dest);
589 }
590
591 void load16Signed(Address address, RegisterID dest)
592 {
593 m_assembler.movswl_mr(offset: address.offset, base: address.base, dst: dest);
594 }
595
596 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
597 {
598 padBeforePatch();
599 m_assembler.movl_rm_disp32(src, offset: address.offset, base: address.base);
600 return DataLabel32(this);
601 }
602
603 void store32(RegisterID src, ImplicitAddress address)
604 {
605 m_assembler.movl_rm(src, offset: address.offset, base: address.base);
606 }
607
608 void store32(RegisterID src, BaseIndex address)
609 {
610 m_assembler.movl_rm(src, offset: address.offset, base: address.base, index: address.index, scale: address.scale);
611 }
612
613 void store32(TrustedImm32 imm, ImplicitAddress address)
614 {
615 m_assembler.movl_i32m(imm: imm.m_value, offset: address.offset, base: address.base);
616 }
617
618 void store32(TrustedImm32 imm, BaseIndex address)
619 {
620 m_assembler.movl_i32m(imm: imm.m_value, offset: address.offset, base: address.base, index: address.index, scale: address.scale);
621 }
622
623 void store8(TrustedImm32 imm, Address address)
624 {
625 ASSERT(-128 <= imm.m_value && imm.m_value < 128);
626 m_assembler.movb_i8m(imm: imm.m_value, offset: address.offset, base: address.base);
627 }
628
629 void store8(TrustedImm32 imm, BaseIndex address)
630 {
631 ASSERT(-128 <= imm.m_value && imm.m_value < 128);
632 m_assembler.movb_i8m(imm: imm.m_value, offset: address.offset, base: address.base, index: address.index, scale: address.scale);
633 }
634
635 void store8(RegisterID src, BaseIndex address)
636 {
637#if CPU(X86)
638 // On 32-bit x86 we can only store from the first 4 registers;
639 // esp..edi are mapped to the 'h' registers!
640 if (src >= 4) {
641 // Pick a temporary register.
642 RegisterID temp;
643 if (address.base != X86Registers::eax && address.index != X86Registers::eax)
644 temp = X86Registers::eax;
645 else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
646 temp = X86Registers::ebx;
647 else {
648 ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
649 temp = X86Registers::ecx;
650 }
651
652 // Swap to the temporary register to perform the store.
653 swap(src, temp);
654 m_assembler.movb_rm(temp, address.offset, address.base, address.index, address.scale);
655 swap(src, temp);
656 return;
657 }
658#endif
659 m_assembler.movb_rm(src, offset: address.offset, base: address.base, index: address.index, scale: address.scale);
660 }
661
662 void store16(RegisterID src, BaseIndex address)
663 {
664#if CPU(X86)
665 // On 32-bit x86 we can only store from the first 4 registers;
666 // esp..edi are mapped to the 'h' registers!
667 if (src >= 4) {
668 // Pick a temporary register.
669 RegisterID temp;
670 if (address.base != X86Registers::eax && address.index != X86Registers::eax)
671 temp = X86Registers::eax;
672 else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
673 temp = X86Registers::ebx;
674 else {
675 ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
676 temp = X86Registers::ecx;
677 }
678
679 // Swap to the temporary register to perform the store.
680 swap(src, temp);
681 m_assembler.movw_rm(temp, address.offset, address.base, address.index, address.scale);
682 swap(src, temp);
683 return;
684 }
685#endif
686 m_assembler.movw_rm(src, offset: address.offset, base: address.base, index: address.index, scale: address.scale);
687 }
688
689
690 // Floating-point operation:
691 //
692 // Presently only supports SSE, not x87 floating point.
693
694 void moveDouble(FPRegisterID src, FPRegisterID dest)
695 {
696 ASSERT(isSSE2Present());
697 if (src != dest)
698 m_assembler.movsd_rr(src, dst: dest);
699 }
700
701 void loadDouble(const void* address, FPRegisterID dest)
702 {
703#if CPU(X86)
704 ASSERT(isSSE2Present());
705 m_assembler.movsd_mr(address, dest);
706#else
707 move(imm: TrustedImmPtr(address), dest: scratchRegister);
708 loadDouble(address: scratchRegister, dest);
709#endif
710 }
711
712 void loadDouble(ImplicitAddress address, FPRegisterID dest)
713 {
714 ASSERT(isSSE2Present());
715 m_assembler.movsd_mr(offset: address.offset, base: address.base, dst: dest);
716 }
717
718 void loadDouble(BaseIndex address, FPRegisterID dest)
719 {
720 ASSERT(isSSE2Present());
721 m_assembler.movsd_mr(offset: address.offset, base: address.base, index: address.index, scale: address.scale, dst: dest);
722 }
723 void loadFloat(BaseIndex address, FPRegisterID dest)
724 {
725 ASSERT(isSSE2Present());
726 m_assembler.movss_mr(offset: address.offset, base: address.base, index: address.index, scale: address.scale, dst: dest);
727 }
728
729 void storeDouble(FPRegisterID src, ImplicitAddress address)
730 {
731 ASSERT(isSSE2Present());
732 m_assembler.movsd_rm(src, offset: address.offset, base: address.base);
733 }
734
735 void storeDouble(FPRegisterID src, BaseIndex address)
736 {
737 ASSERT(isSSE2Present());
738 m_assembler.movsd_rm(src, offset: address.offset, base: address.base, index: address.index, scale: address.scale);
739 }
740
741 void storeFloat(FPRegisterID src, BaseIndex address)
742 {
743 ASSERT(isSSE2Present());
744 m_assembler.movss_rm(src, offset: address.offset, base: address.base, index: address.index, scale: address.scale);
745 }
746
747 void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
748 {
749 ASSERT(isSSE2Present());
750 m_assembler.cvtsd2ss_rr(src, dst);
751 }
752
753 void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
754 {
755 ASSERT(isSSE2Present());
756 m_assembler.cvtss2sd_rr(src, dst);
757 }
758
759 void addDouble(FPRegisterID src, FPRegisterID dest)
760 {
761 ASSERT(isSSE2Present());
762 m_assembler.addsd_rr(src, dst: dest);
763 }
764
765 void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
766 {
767 ASSERT(isSSE2Present());
768 if (op1 == dest)
769 addDouble(src: op2, dest);
770 else {
771 moveDouble(src: op2, dest);
772 addDouble(src: op1, dest);
773 }
774 }
775
776 void addDouble(Address src, FPRegisterID dest)
777 {
778 ASSERT(isSSE2Present());
779 m_assembler.addsd_mr(offset: src.offset, base: src.base, dst: dest);
780 }
781
782 void divDouble(FPRegisterID src, FPRegisterID dest)
783 {
784 ASSERT(isSSE2Present());
785 m_assembler.divsd_rr(src, dst: dest);
786 }
787
788 void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
789 {
790 // B := A / B is invalid.
791 ASSERT(op1 == dest || op2 != dest);
792
793 moveDouble(src: op1, dest);
794 divDouble(src: op2, dest);
795 }
796
797 void divDouble(Address src, FPRegisterID dest)
798 {
799 ASSERT(isSSE2Present());
800 m_assembler.divsd_mr(offset: src.offset, base: src.base, dst: dest);
801 }
802
803 void subDouble(FPRegisterID src, FPRegisterID dest)
804 {
805 ASSERT(isSSE2Present());
806 m_assembler.subsd_rr(src, dst: dest);
807 }
808
809 void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
810 {
811 // B := A - B is invalid.
812 ASSERT(op1 == dest || op2 != dest);
813
814 moveDouble(src: op1, dest);
815 subDouble(src: op2, dest);
816 }
817
818 void subDouble(Address src, FPRegisterID dest)
819 {
820 ASSERT(isSSE2Present());
821 m_assembler.subsd_mr(offset: src.offset, base: src.base, dst: dest);
822 }
823
824 void mulDouble(FPRegisterID src, FPRegisterID dest)
825 {
826 ASSERT(isSSE2Present());
827 m_assembler.mulsd_rr(src, dst: dest);
828 }
829
830 void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
831 {
832 ASSERT(isSSE2Present());
833 if (op1 == dest)
834 mulDouble(src: op2, dest);
835 else {
836 moveDouble(src: op2, dest);
837 mulDouble(src: op1, dest);
838 }
839 }
840
841 void mulDouble(Address src, FPRegisterID dest)
842 {
843 ASSERT(isSSE2Present());
844 m_assembler.mulsd_mr(offset: src.offset, base: src.base, dst: dest);
845 }
846
847 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
848 {
849 ASSERT(isSSE2Present());
850 m_assembler.cvtsi2sd_rr(src, dst: dest);
851 }
852
853 void convertInt32ToDouble(Address src, FPRegisterID dest)
854 {
855 ASSERT(isSSE2Present());
856 m_assembler.cvtsi2sd_mr(offset: src.offset, base: src.base, dst: dest);
857 }
858
859 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
860 {
861 ASSERT(isSSE2Present());
862
863 if (cond & DoubleConditionBitInvert)
864 m_assembler.ucomisd_rr(src: left, dst: right);
865 else
866 m_assembler.ucomisd_rr(src: right, dst: left);
867
868 if (cond == DoubleEqual) {
869 if (left == right)
870 return Jump(m_assembler.jnp());
871 Jump isUnordered(m_assembler.jp());
872 Jump result = Jump(m_assembler.je());
873 isUnordered.link(masm: this);
874 return result;
875 } else if (cond == DoubleNotEqualOrUnordered) {
876 if (left == right)
877 return Jump(m_assembler.jp());
878 Jump isUnordered(m_assembler.jp());
879 Jump isEqual(m_assembler.je());
880 isUnordered.link(masm: this);
881 Jump result = jump();
882 isEqual.link(masm: this);
883 return result;
884 }
885
886 ASSERT(!(cond & DoubleConditionBitSpecial));
887 return Jump(m_assembler.jCC(cond: static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
888 }
889
890 // Truncates 'src' to an integer, and places the resulting 'dest'.
891 // If the result is not representable as a 32 bit value, branch.
892 // May also branch for some values that are representable in 32 bits
893 // (specifically, in this case, INT_MIN).
894 enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
895 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
896 {
897 ASSERT(isSSE2Present());
898 m_assembler.cvttsd2si_rr(src, dst: dest);
899 return branch32(cond: branchType ? NotEqual : Equal, left: dest, right: TrustedImm32(0x80000000));
900 }
901
902 Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
903 {
904 ASSERT(isSSE2Present());
905 m_assembler.cvttsd2si_rr(src, dst: dest);
906 return branch32(cond: branchType ? GreaterThanOrEqual : LessThan, left: dest, right: TrustedImm32(0));
907 }
908
909 void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
910 {
911 ASSERT(isSSE2Present());
912 m_assembler.cvttsd2si_rr(src, dst: dest);
913 }
914
915#if CPU(X86_64)
916 void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
917 {
918 ASSERT(isSSE2Present());
919 m_assembler.cvttsd2siq_rr(src, dst: dest);
920 }
921#endif
922
923 // Convert 'src' to an integer, and places the resulting 'dest'.
924 // If the result is not representable as a 32 bit value, branch.
925 // May also branch for some values that are representable in 32 bits
926 // (specifically, in this case, 0).
927 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
928 {
929 ASSERT(isSSE2Present());
930 m_assembler.cvttsd2si_rr(src, dst: dest);
931
932 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
933 failureCases.append(jump: branchTest32(cond: Zero, reg: dest));
934
935 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
936 convertInt32ToDouble(src: dest, dest: fpTemp);
937 m_assembler.ucomisd_rr(src: fpTemp, dst: src);
938 failureCases.append(jump: m_assembler.jp());
939 failureCases.append(jump: m_assembler.jne());
940 }
941
942 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
943 {
944 ASSERT(isSSE2Present());
945 m_assembler.xorpd_rr(src: scratch, dst: scratch);
946 return branchDouble(cond: DoubleNotEqual, left: reg, right: scratch);
947 }
948
949 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
950 {
951 ASSERT(isSSE2Present());
952 m_assembler.xorpd_rr(src: scratch, dst: scratch);
953 return branchDouble(cond: DoubleEqualOrUnordered, left: reg, right: scratch);
954 }
955
956 void lshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
957 {
958 ASSERT(isSSE2Present());
959 m_assembler.psllq_i8r(imm: imm.m_value, dst: reg);
960 }
961
962 void rshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
963 {
964 ASSERT(isSSE2Present());
965 m_assembler.psrlq_i8r(imm: imm.m_value, dst: reg);
966 }
967
968 void orPacked(XMMRegisterID src, XMMRegisterID dst)
969 {
970 ASSERT(isSSE2Present());
971 m_assembler.por_rr(src, dst);
972 }
973
974 void moveInt32ToPacked(RegisterID src, XMMRegisterID dst)
975 {
976 ASSERT(isSSE2Present());
977 m_assembler.movd_rr(src, dst);
978 }
979
980 void movePackedToInt32(XMMRegisterID src, RegisterID dst)
981 {
982 ASSERT(isSSE2Present());
983 m_assembler.movd_rr(src, dst);
984 }
985
986 // Stack manipulation operations:
987 //
988 // The ABI is assumed to provide a stack abstraction to memory,
989 // containing machine word sized units of data. Push and pop
990 // operations add and remove a single register sized unit of data
991 // to or from the stack. Peek and poke operations read or write
992 // values on the stack, without moving the current stack position.
993
994 void pop(RegisterID dest)
995 {
996 m_assembler.pop_r(reg: dest);
997 }
998
999 void push(RegisterID src)
1000 {
1001 m_assembler.push_r(reg: src);
1002 }
1003
1004 void push(Address address)
1005 {
1006 m_assembler.push_m(offset: address.offset, base: address.base);
1007 }
1008
1009 void push(TrustedImm32 imm)
1010 {
1011 m_assembler.push_i32(imm: imm.m_value);
1012 }
1013
1014
1015 // Register move operations:
1016 //
1017 // Move values in registers.
1018
1019 void move(TrustedImm32 imm, RegisterID dest)
1020 {
1021 // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
1022 // may be useful to have a separate version that sign extends the value?
1023 if (!imm.m_value)
1024 m_assembler.xorl_rr(src: dest, dst: dest);
1025 else
1026 m_assembler.movl_i32r(imm: imm.m_value, dst: dest);
1027 }
1028
1029#if CPU(X86_64)
1030 void move(RegisterID src, RegisterID dest)
1031 {
1032 // Note: on 64-bit this is is a full register move; perhaps it would be
1033 // useful to have separate move32 & movePtr, with move32 zero extending?
1034 if (src != dest)
1035 m_assembler.movq_rr(src, dst: dest);
1036 }
1037
1038 void move(TrustedImmPtr imm, RegisterID dest)
1039 {
1040 m_assembler.movq_i64r(imm: imm.asIntptr(), dst: dest);
1041 }
1042
1043 void move(TrustedImm64 imm, RegisterID dest)
1044 {
1045 m_assembler.movq_i64r(imm: imm.m_value, dst: dest);
1046 }
1047
1048 void swap(RegisterID reg1, RegisterID reg2)
1049 {
1050 if (reg1 != reg2)
1051 m_assembler.xchgq_rr(src: reg1, dst: reg2);
1052 }
1053
1054 void signExtend32ToPtr(RegisterID src, RegisterID dest)
1055 {
1056 m_assembler.movsxd_rr(src, dst: dest);
1057 }
1058
1059 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1060 {
1061 m_assembler.movl_rr(src, dst: dest);
1062 }
1063#else
1064 void move(RegisterID src, RegisterID dest)
1065 {
1066 if (src != dest)
1067 m_assembler.movl_rr(src, dest);
1068 }
1069
1070 void move(TrustedImmPtr imm, RegisterID dest)
1071 {
1072 m_assembler.movl_i32r(imm.asIntptr(), dest);
1073 }
1074
1075 void swap(RegisterID reg1, RegisterID reg2)
1076 {
1077 if (reg1 != reg2)
1078 m_assembler.xchgl_rr(reg1, reg2);
1079 }
1080
1081 void signExtend32ToPtr(RegisterID src, RegisterID dest)
1082 {
1083 move(src, dest);
1084 }
1085
1086 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1087 {
1088 move(src, dest);
1089 }
1090#endif
1091
1092
1093 // Forwards / external control flow operations:
1094 //
1095 // This set of jump and conditional branch operations return a Jump
1096 // object which may linked at a later point, allow forwards jump,
1097 // or jumps that will require external linkage (after the code has been
1098 // relocated).
1099 //
1100 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1101 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1102 // used (representing the names 'below' and 'above').
1103 //
1104 // Operands to the comparision are provided in the expected order, e.g.
1105 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1106 // treated as a signed 32bit value, is less than or equal to 5.
1107 //
1108 // jz and jnz test whether the first operand is equal to zero, and take
1109 // an optional second operand of a mask under which to perform the test.
1110
1111public:
1112 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
1113 {
1114 m_assembler.cmpb_im(imm: right.m_value, offset: left.offset, base: left.base);
1115 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1116 }
1117
1118 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
1119 {
1120 m_assembler.cmpl_rr(src: right, dst: left);
1121 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1122 }
1123
1124 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1125 {
1126 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1127 m_assembler.testl_rr(src: left, dst: left);
1128 else
1129 m_assembler.cmpl_ir(imm: right.m_value, dst: left);
1130 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1131 }
1132
1133 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
1134 {
1135 m_assembler.cmpl_mr(offset: right.offset, base: right.base, src: left);
1136 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1137 }
1138
1139 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
1140 {
1141 m_assembler.cmpl_rm(src: right, offset: left.offset, base: left.base);
1142 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1143 }
1144
1145 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
1146 {
1147 m_assembler.cmpl_im(imm: right.m_value, offset: left.offset, base: left.base);
1148 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1149 }
1150
1151 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1152 {
1153 m_assembler.cmpl_im(imm: right.m_value, offset: left.offset, base: left.base, index: left.index, scale: left.scale);
1154 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1155 }
1156
1157 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1158 {
1159 return branch32(cond, left, right);
1160 }
1161
1162 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1163 {
1164 m_assembler.testl_rr(src: reg, dst: mask);
1165 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1166 }
1167
1168 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1169 {
1170 // if we are only interested in the low seven bits, this can be tested with a testb
1171 if (mask.m_value == -1)
1172 m_assembler.testl_rr(src: reg, dst: reg);
1173 else
1174 m_assembler.testl_i32r(imm: mask.m_value, dst: reg);
1175 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1176 }
1177
1178 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1179 {
1180 if (mask.m_value == -1)
1181 m_assembler.cmpl_im(imm: 0, offset: address.offset, base: address.base);
1182 else
1183 m_assembler.testl_i32m(imm: mask.m_value, offset: address.offset, base: address.base);
1184 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1185 }
1186
1187 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1188 {
1189 if (mask.m_value == -1)
1190 m_assembler.cmpl_im(imm: 0, offset: address.offset, base: address.base, index: address.index, scale: address.scale);
1191 else
1192 m_assembler.testl_i32m(imm: mask.m_value, offset: address.offset, base: address.base, index: address.index, scale: address.scale);
1193 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1194 }
1195
1196 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1197 {
1198 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
1199 ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
1200 if (mask.m_value == -1)
1201 m_assembler.cmpb_im(imm: 0, offset: address.offset, base: address.base);
1202 else
1203 m_assembler.testb_im(imm: mask.m_value, offset: address.offset, base: address.base);
1204 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1205 }
1206
1207 Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1208 {
1209 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
1210 ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
1211 if (mask.m_value == -1)
1212 m_assembler.cmpb_im(imm: 0, offset: address.offset, base: address.base, index: address.index, scale: address.scale);
1213 else
1214 m_assembler.testb_im(imm: mask.m_value, offset: address.offset, base: address.base, index: address.index, scale: address.scale);
1215 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1216 }
1217
1218 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1219 {
1220 ASSERT(!(right.m_value & 0xFFFFFF00));
1221
1222 m_assembler.cmpb_im(imm: right.m_value, offset: left.offset, base: left.base, index: left.index, scale: left.scale);
1223 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1224 }
1225
1226 Jump jump()
1227 {
1228 return Jump(m_assembler.jmp());
1229 }
1230
1231 void jump(RegisterID target)
1232 {
1233 m_assembler.jmp_r(dst: target);
1234 }
1235
1236 // Address is a memory location containing the address to jump to
1237 void jump(Address address)
1238 {
1239 m_assembler.jmp_m(offset: address.offset, base: address.base);
1240 }
1241
1242
1243 // Arithmetic control flow operations:
1244 //
1245 // This set of conditional branch operations branch based
1246 // on the result of an arithmetic operation. The operation
1247 // is performed as normal, storing the result.
1248 //
1249 // * jz operations branch if the result is zero.
1250 // * jo operations branch if the (signed) arithmetic
1251 // operation caused an overflow to occur.
1252
1253 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1254 {
1255 add32(src, dest);
1256 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1257 }
1258
1259 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1260 {
1261 add32(imm, dest);
1262 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1263 }
1264
1265 Jump branchAdd32(ResultCondition cond, TrustedImm32 src, Address dest)
1266 {
1267 add32(imm: src, address: dest);
1268 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1269 }
1270
1271 Jump branchAdd32(ResultCondition cond, RegisterID src, Address dest)
1272 {
1273 add32(src, dest);
1274 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1275 }
1276
1277 Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
1278 {
1279 add32(src, dest);
1280 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1281 }
1282
1283 Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1284 {
1285 if (src1 == dest)
1286 return branchAdd32(cond, src: src2, dest);
1287 move(src: src2, dest);
1288 return branchAdd32(cond, src: src1, dest);
1289 }
1290
1291 Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
1292 {
1293 move(src, dest);
1294 return branchAdd32(cond, imm, dest);
1295 }
1296
1297 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1298 {
1299 mul32(src, dest);
1300 if (cond != Overflow)
1301 m_assembler.testl_rr(src: dest, dst: dest);
1302 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1303 }
1304
1305 Jump branchMul32(ResultCondition cond, Address src, RegisterID dest)
1306 {
1307 mul32(src, dest);
1308 if (cond != Overflow)
1309 m_assembler.testl_rr(src: dest, dst: dest);
1310 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1311 }
1312
1313 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1314 {
1315 mul32(imm, src, dest);
1316 if (cond != Overflow)
1317 m_assembler.testl_rr(src: dest, dst: dest);
1318 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1319 }
1320
1321 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1322 {
1323 if (src1 == dest)
1324 return branchMul32(cond, src: src2, dest);
1325 move(src: src2, dest);
1326 return branchMul32(cond, src: src1, dest);
1327 }
1328
1329 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1330 {
1331 sub32(src, dest);
1332 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1333 }
1334
1335 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1336 {
1337 sub32(imm, dest);
1338 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1339 }
1340
1341 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, Address dest)
1342 {
1343 sub32(imm, address: dest);
1344 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1345 }
1346
1347 Jump branchSub32(ResultCondition cond, RegisterID src, Address dest)
1348 {
1349 sub32(src, dest);
1350 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1351 }
1352
1353 Jump branchSub32(ResultCondition cond, Address src, RegisterID dest)
1354 {
1355 sub32(src, dest);
1356 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1357 }
1358
1359 Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1360 {
1361 // B := A - B is invalid.
1362 ASSERT(src1 == dest || src2 != dest);
1363
1364 move(src: src1, dest);
1365 return branchSub32(cond, src: src2, dest);
1366 }
1367
1368 Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
1369 {
1370 move(src: src1, dest);
1371 return branchSub32(cond, imm: src2, dest);
1372 }
1373
1374 Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
1375 {
1376 neg32(srcDest);
1377 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1378 }
1379
1380 Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
1381 {
1382 or32(src, dest);
1383 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
1384 }
1385
1386
1387 // Miscellaneous operations:
1388
1389 void breakpoint()
1390 {
1391 m_assembler.int3();
1392 }
1393
1394 Call nearCall()
1395 {
1396 return Call(m_assembler.call(), Call::LinkableNear);
1397 }
1398
1399 Call call(RegisterID target)
1400 {
1401 return Call(m_assembler.call(dst: target), Call::None);
1402 }
1403
1404 void call(Address address)
1405 {
1406 m_assembler.call_m(offset: address.offset, base: address.base);
1407 }
1408
1409 void ret()
1410 {
1411 m_assembler.ret();
1412 }
1413
1414 void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
1415 {
1416 m_assembler.cmpb_im(imm: right.m_value, offset: left.offset, base: left.base);
1417 set32(cond: x86Condition(cond), dest);
1418 }
1419
1420 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1421 {
1422 m_assembler.cmpl_rr(src: right, dst: left);
1423 set32(cond: x86Condition(cond), dest);
1424 }
1425
1426 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1427 {
1428 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1429 m_assembler.testl_rr(src: left, dst: left);
1430 else
1431 m_assembler.cmpl_ir(imm: right.m_value, dst: left);
1432 set32(cond: x86Condition(cond), dest);
1433 }
1434
1435 // FIXME:
1436 // The mask should be optional... perhaps the argument order should be
1437 // dest-src, operations always have a dest? ... possibly not true, considering
1438 // asm ops like test, or pseudo ops like pop().
1439
1440 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1441 {
1442 if (mask.m_value == -1)
1443 m_assembler.cmpb_im(imm: 0, offset: address.offset, base: address.base);
1444 else
1445 m_assembler.testb_im(imm: mask.m_value, offset: address.offset, base: address.base);
1446 set32(cond: x86Condition(cond), dest);
1447 }
1448
1449 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1450 {
1451 if (mask.m_value == -1)
1452 m_assembler.cmpl_im(imm: 0, offset: address.offset, base: address.base);
1453 else
1454 m_assembler.testl_i32m(imm: mask.m_value, offset: address.offset, base: address.base);
1455 set32(cond: x86Condition(cond), dest);
1456 }
1457
1458 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1459 static RelationalCondition invert(RelationalCondition cond)
1460 {
1461 return static_cast<RelationalCondition>(cond ^ 1);
1462 }
1463
1464 void nop()
1465 {
1466 m_assembler.nop();
1467 }
1468
1469 static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
1470 {
1471 X86Assembler::replaceWithJump(instructionStart: instructionStart.executableAddress(), to: destination.executableAddress());
1472 }
1473
1474 static ptrdiff_t maxJumpReplacementSize()
1475 {
1476 return X86Assembler::maxJumpReplacementSize();
1477 }
1478
1479protected:
1480 X86Assembler::Condition x86Condition(RelationalCondition cond)
1481 {
1482 return static_cast<X86Assembler::Condition>(cond);
1483 }
1484
1485 X86Assembler::Condition x86Condition(ResultCondition cond)
1486 {
1487 return static_cast<X86Assembler::Condition>(cond);
1488 }
1489
1490 void set32(X86Assembler::Condition cond, RegisterID dest)
1491 {
1492#if CPU(X86)
1493 // On 32-bit x86 we can only set the first 4 registers;
1494 // esp..edi are mapped to the 'h' registers!
1495 if (dest >= 4) {
1496 m_assembler.xchgl_rr(dest, X86Registers::eax);
1497 m_assembler.setCC_r(cond, X86Registers::eax);
1498 m_assembler.movzbl_rr(X86Registers::eax, X86Registers::eax);
1499 m_assembler.xchgl_rr(dest, X86Registers::eax);
1500 return;
1501 }
1502#endif
1503 m_assembler.setCC_r(cond, dst: dest);
1504 m_assembler.movzbl_rr(src: dest, dst: dest);
1505 }
1506
1507private:
1508 // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
1509 // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
1510 friend class MacroAssemblerX86;
1511
1512#if CPU(X86)
1513#if OS(MAC_OS_X)
1514
1515 // All X86 Macs are guaranteed to support at least SSE2,
1516 static bool isSSE2Present()
1517 {
1518 return true;
1519 }
1520
1521#else // OS(MAC_OS_X)
1522
1523 enum SSE2CheckState {
1524 NotCheckedSSE2,
1525 HasSSE2,
1526 NoSSE2
1527 };
1528
1529 static bool isSSE2Present()
1530 {
1531 if (s_sse2CheckState == NotCheckedSSE2) {
1532 // Default the flags value to zero; if the compiler is
1533 // not MSVC or GCC we will read this as SSE2 not present.
1534 int flags = 0;
1535#if COMPILER(MSVC)
1536 _asm {
1537 mov eax, 1 // cpuid function 1 gives us the standard feature set
1538 cpuid;
1539 mov flags, edx;
1540 }
1541#elif COMPILER(GCC)
1542 asm (
1543 "movl $0x1, %%eax;"
1544 "pushl %%ebx;"
1545 "cpuid;"
1546 "popl %%ebx;"
1547 "movl %%edx, %0;"
1548 : "=g" (flags)
1549 :
1550 : "%eax", "%ecx", "%edx"
1551 );
1552#endif
1553 static const int SSE2FeatureBit = 1 << 26;
1554 s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
1555 }
1556 // Only check once.
1557 ASSERT(s_sse2CheckState != NotCheckedSSE2);
1558
1559 return s_sse2CheckState == HasSSE2;
1560 }
1561
1562 static SSE2CheckState s_sse2CheckState;
1563
1564#endif // OS(MAC_OS_X)
1565#elif !defined(NDEBUG) // CPU(X86)
1566
1567 // On x86-64 we should never be checking for SSE2 in a non-debug build,
1568 // but non debug add this method to keep the asserts above happy.
1569 static bool isSSE2Present()
1570 {
1571 return true;
1572 }
1573
1574#endif
1575};
1576
1577} // namespace JSC
1578
1579#endif // ENABLE(ASSEMBLER)
1580
1581#endif // MacroAssemblerX86Common_h
1582

source code of qtdeclarative/src/3rdparty/masm/assembler/MacroAssemblerX86Common.h