1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef MacroAssemblerX86Common_h
27#define MacroAssemblerX86Common_h
28
29#include <wtf/Platform.h>
30
31#if ENABLE(ASSEMBLER)
32
33#include "X86Assembler.h"
34#include "AbstractMacroAssembler.h"
35
36namespace JSC {
37
38class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
39 static const int DoubleConditionBitInvert = 0x10;
40 static const int DoubleConditionBitSpecial = 0x20;
41 static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
42
43public:
44
45 enum Condition {
46 Equal = X86Assembler::ConditionE,
47 NotEqual = X86Assembler::ConditionNE,
48 Above = X86Assembler::ConditionA,
49 AboveOrEqual = X86Assembler::ConditionAE,
50 Below = X86Assembler::ConditionB,
51 BelowOrEqual = X86Assembler::ConditionBE,
52 GreaterThan = X86Assembler::ConditionG,
53 GreaterThanOrEqual = X86Assembler::ConditionGE,
54 LessThan = X86Assembler::ConditionL,
55 LessThanOrEqual = X86Assembler::ConditionLE,
56 Overflow = X86Assembler::ConditionO,
57 Signed = X86Assembler::ConditionS,
58 Zero = X86Assembler::ConditionE,
59 NonZero = X86Assembler::ConditionNE
60 };
61
62 enum DoubleCondition {
63 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
64 DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
65 DoubleNotEqual = X86Assembler::ConditionNE,
66 DoubleGreaterThan = X86Assembler::ConditionA,
67 DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
68 DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
69 DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
70 // If either operand is NaN, these conditions always evaluate to true.
71 DoubleEqualOrUnordered = X86Assembler::ConditionE,
72 DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
73 DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
74 DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
75 DoubleLessThanOrUnordered = X86Assembler::ConditionB,
76 DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
77 };
78 COMPILE_ASSERT(
79 !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
80 DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
81
82 static const RegisterID stackPointerRegister = X86Registers::esp;
83
84 // Integer arithmetic operations:
85 //
86 // Operations are typically two operand - operation(source, srcDst)
87 // For many operations the source may be an Imm32, the srcDst operand
88 // may often be a memory location (explictly described using an Address
89 // object).
90
91 void add32(RegisterID src, RegisterID dest)
92 {
93 m_assembler.addl_rr(src, dst: dest);
94 }
95
96 void add32(Imm32 imm, Address address)
97 {
98 m_assembler.addl_im(imm: imm.m_value, offset: address.offset, base: address.base);
99 }
100
101 void add32(Imm32 imm, RegisterID dest)
102 {
103 m_assembler.addl_ir(imm: imm.m_value, dst: dest);
104 }
105
106 void add32(Address src, RegisterID dest)
107 {
108 m_assembler.addl_mr(offset: src.offset, base: src.base, dst: dest);
109 }
110
111 void add32(RegisterID src, Address dest)
112 {
113 m_assembler.addl_rm(src, offset: dest.offset, base: dest.base);
114 }
115
116 void and32(RegisterID src, RegisterID dest)
117 {
118 m_assembler.andl_rr(src, dst: dest);
119 }
120
121 void and32(Imm32 imm, RegisterID dest)
122 {
123 m_assembler.andl_ir(imm: imm.m_value, dst: dest);
124 }
125
126 void and32(RegisterID src, Address dest)
127 {
128 m_assembler.andl_rm(src, offset: dest.offset, base: dest.base);
129 }
130
131 void and32(Address src, RegisterID dest)
132 {
133 m_assembler.andl_mr(offset: src.offset, base: src.base, dst: dest);
134 }
135
136 void and32(Imm32 imm, Address address)
137 {
138 m_assembler.andl_im(imm: imm.m_value, offset: address.offset, base: address.base);
139 }
140
141 void lshift32(Imm32 imm, RegisterID dest)
142 {
143 m_assembler.shll_i8r(imm: imm.m_value, dst: dest);
144 }
145
146 void lshift32(RegisterID shift_amount, RegisterID dest)
147 {
148 // On x86 we can only shift by ecx; if asked to shift by another register we'll
149 // need rejig the shift amount into ecx first, and restore the registers afterwards.
150 if (shift_amount != X86Registers::ecx) {
151 swap(reg1: shift_amount, reg2: X86Registers::ecx);
152
153 // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
154 if (dest == shift_amount)
155 m_assembler.shll_CLr(dst: X86Registers::ecx);
156 // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
157 else if (dest == X86Registers::ecx)
158 m_assembler.shll_CLr(dst: shift_amount);
159 // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
160 else
161 m_assembler.shll_CLr(dst: dest);
162
163 swap(reg1: shift_amount, reg2: X86Registers::ecx);
164 } else
165 m_assembler.shll_CLr(dst: dest);
166 }
167
168 void mul32(RegisterID src, RegisterID dest)
169 {
170 m_assembler.imull_rr(src, dst: dest);
171 }
172
173 void mul32(Address src, RegisterID dest)
174 {
175 m_assembler.imull_mr(offset: src.offset, base: src.base, dst: dest);
176 }
177
178 void mul32(Imm32 imm, RegisterID src, RegisterID dest)
179 {
180 m_assembler.imull_i32r(src, value: imm.m_value, dst: dest);
181 }
182
183 void neg32(RegisterID srcDest)
184 {
185 m_assembler.negl_r(dst: srcDest);
186 }
187
188 void neg32(Address srcDest)
189 {
190 m_assembler.negl_m(offset: srcDest.offset, base: srcDest.base);
191 }
192
193 void not32(RegisterID srcDest)
194 {
195 m_assembler.notl_r(dst: srcDest);
196 }
197
198 void not32(Address srcDest)
199 {
200 m_assembler.notl_m(offset: srcDest.offset, base: srcDest.base);
201 }
202
203 void or32(RegisterID src, RegisterID dest)
204 {
205 m_assembler.orl_rr(src, dst: dest);
206 }
207
208 void or32(Imm32 imm, RegisterID dest)
209 {
210 m_assembler.orl_ir(imm: imm.m_value, dst: dest);
211 }
212
213 void or32(RegisterID src, Address dest)
214 {
215 m_assembler.orl_rm(src, offset: dest.offset, base: dest.base);
216 }
217
218 void or32(Address src, RegisterID dest)
219 {
220 m_assembler.orl_mr(offset: src.offset, base: src.base, dst: dest);
221 }
222
223 void or32(Imm32 imm, Address address)
224 {
225 m_assembler.orl_im(imm: imm.m_value, offset: address.offset, base: address.base);
226 }
227
228 void rshift32(RegisterID shift_amount, RegisterID dest)
229 {
230 // On x86 we can only shift by ecx; if asked to shift by another register we'll
231 // need rejig the shift amount into ecx first, and restore the registers afterwards.
232 if (shift_amount != X86Registers::ecx) {
233 swap(reg1: shift_amount, reg2: X86Registers::ecx);
234
235 // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
236 if (dest == shift_amount)
237 m_assembler.sarl_CLr(dst: X86Registers::ecx);
238 // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
239 else if (dest == X86Registers::ecx)
240 m_assembler.sarl_CLr(dst: shift_amount);
241 // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
242 else
243 m_assembler.sarl_CLr(dst: dest);
244
245 swap(reg1: shift_amount, reg2: X86Registers::ecx);
246 } else
247 m_assembler.sarl_CLr(dst: dest);
248 }
249
250 void rshift32(Imm32 imm, RegisterID dest)
251 {
252 m_assembler.sarl_i8r(imm: imm.m_value, dst: dest);
253 }
254
255 void sub32(RegisterID src, RegisterID dest)
256 {
257 m_assembler.subl_rr(src, dst: dest);
258 }
259
260 void sub32(Imm32 imm, RegisterID dest)
261 {
262 m_assembler.subl_ir(imm: imm.m_value, dst: dest);
263 }
264
265 void sub32(Imm32 imm, Address address)
266 {
267 m_assembler.subl_im(imm: imm.m_value, offset: address.offset, base: address.base);
268 }
269
270 void sub32(Address src, RegisterID dest)
271 {
272 m_assembler.subl_mr(offset: src.offset, base: src.base, dst: dest);
273 }
274
275 void sub32(RegisterID src, Address dest)
276 {
277 m_assembler.subl_rm(src, offset: dest.offset, base: dest.base);
278 }
279
280
281 void xor32(RegisterID src, RegisterID dest)
282 {
283 m_assembler.xorl_rr(src, dst: dest);
284 }
285
286 void xor32(Imm32 imm, Address dest)
287 {
288 m_assembler.xorl_im(imm: imm.m_value, offset: dest.offset, base: dest.base);
289 }
290
291 void xor32(Imm32 imm, RegisterID dest)
292 {
293 m_assembler.xorl_ir(imm: imm.m_value, dst: dest);
294 }
295
296 void xor32(RegisterID src, Address dest)
297 {
298 m_assembler.xorl_rm(src, offset: dest.offset, base: dest.base);
299 }
300
301 void xor32(Address src, RegisterID dest)
302 {
303 m_assembler.xorl_mr(offset: src.offset, base: src.base, dst: dest);
304 }
305
306
307 // Memory access operations:
308 //
309 // Loads are of the form load(address, destination) and stores of the form
310 // store(source, address). The source for a store may be an Imm32. Address
311 // operand objects to loads and store will be implicitly constructed if a
312 // register is passed.
313
314 void load32(ImplicitAddress address, RegisterID dest)
315 {
316 m_assembler.movl_mr(offset: address.offset, base: address.base, dst: dest);
317 }
318
319 void load32(BaseIndex address, RegisterID dest)
320 {
321 m_assembler.movl_mr(offset: address.offset, base: address.base, index: address.index, scale: address.scale, dst: dest);
322 }
323
324 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
325 {
326 load32(address, dest);
327 }
328
329 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
330 {
331 m_assembler.movl_mr_disp32(offset: address.offset, base: address.base, dst: dest);
332 return DataLabel32(this);
333 }
334
335 void load16(BaseIndex address, RegisterID dest)
336 {
337 m_assembler.movzwl_mr(offset: address.offset, base: address.base, index: address.index, scale: address.scale, dst: dest);
338 }
339
340 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
341 {
342 m_assembler.movl_rm_disp32(src, offset: address.offset, base: address.base);
343 return DataLabel32(this);
344 }
345
346 void store32(RegisterID src, ImplicitAddress address)
347 {
348 m_assembler.movl_rm(src, offset: address.offset, base: address.base);
349 }
350
351 void store32(RegisterID src, BaseIndex address)
352 {
353 m_assembler.movl_rm(src, offset: address.offset, base: address.base, index: address.index, scale: address.scale);
354 }
355
356 void store32(Imm32 imm, ImplicitAddress address)
357 {
358 m_assembler.movl_i32m(imm: imm.m_value, offset: address.offset, base: address.base);
359 }
360
361
362 // Floating-point operation:
363 //
364 // Presently only supports SSE, not x87 floating point.
365
366 void loadDouble(ImplicitAddress address, FPRegisterID dest)
367 {
368 ASSERT(isSSE2Present());
369 m_assembler.movsd_mr(offset: address.offset, base: address.base, dst: dest);
370 }
371
372 void storeDouble(FPRegisterID src, ImplicitAddress address)
373 {
374 ASSERT(isSSE2Present());
375 m_assembler.movsd_rm(src, offset: address.offset, base: address.base);
376 }
377
378 void addDouble(FPRegisterID src, FPRegisterID dest)
379 {
380 ASSERT(isSSE2Present());
381 m_assembler.addsd_rr(src, dst: dest);
382 }
383
384 void addDouble(Address src, FPRegisterID dest)
385 {
386 ASSERT(isSSE2Present());
387 m_assembler.addsd_mr(offset: src.offset, base: src.base, dst: dest);
388 }
389
390 void divDouble(FPRegisterID src, FPRegisterID dest)
391 {
392 ASSERT(isSSE2Present());
393 m_assembler.divsd_rr(src, dst: dest);
394 }
395
396 void divDouble(Address src, FPRegisterID dest)
397 {
398 ASSERT(isSSE2Present());
399 m_assembler.divsd_mr(offset: src.offset, base: src.base, dst: dest);
400 }
401
402 void subDouble(FPRegisterID src, FPRegisterID dest)
403 {
404 ASSERT(isSSE2Present());
405 m_assembler.subsd_rr(src, dst: dest);
406 }
407
408 void subDouble(Address src, FPRegisterID dest)
409 {
410 ASSERT(isSSE2Present());
411 m_assembler.subsd_mr(offset: src.offset, base: src.base, dst: dest);
412 }
413
414 void mulDouble(FPRegisterID src, FPRegisterID dest)
415 {
416 ASSERT(isSSE2Present());
417 m_assembler.mulsd_rr(src, dst: dest);
418 }
419
420 void mulDouble(Address src, FPRegisterID dest)
421 {
422 ASSERT(isSSE2Present());
423 m_assembler.mulsd_mr(offset: src.offset, base: src.base, dst: dest);
424 }
425
426 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
427 {
428 ASSERT(isSSE2Present());
429 m_assembler.cvtsi2sd_rr(src, dst: dest);
430 }
431
432 void convertInt32ToDouble(Address src, FPRegisterID dest)
433 {
434 ASSERT(isSSE2Present());
435 m_assembler.cvtsi2sd_mr(offset: src.offset, base: src.base, dst: dest);
436 }
437
438 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
439 {
440 ASSERT(isSSE2Present());
441
442 if (cond & DoubleConditionBitInvert)
443 m_assembler.ucomisd_rr(src: left, dst: right);
444 else
445 m_assembler.ucomisd_rr(src: right, dst: left);
446
447 if (cond == DoubleEqual) {
448 Jump isUnordered(m_assembler.jp());
449 Jump result = Jump(m_assembler.je());
450 isUnordered.link(masm: this);
451 return result;
452 } else if (cond == DoubleNotEqualOrUnordered) {
453 Jump isUnordered(m_assembler.jp());
454 Jump isEqual(m_assembler.je());
455 isUnordered.link(masm: this);
456 Jump result = jump();
457 isEqual.link(masm: this);
458 return result;
459 }
460
461 ASSERT(!(cond & DoubleConditionBitSpecial));
462 return Jump(m_assembler.jCC(cond: static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
463 }
464
465 // Truncates 'src' to an integer, and places the resulting 'dest'.
466 // If the result is not representable as a 32 bit value, branch.
467 // May also branch for some values that are representable in 32 bits
468 // (specifically, in this case, INT_MIN).
469 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
470 {
471 ASSERT(isSSE2Present());
472 m_assembler.cvttsd2si_rr(src, dst: dest);
473 return branch32(cond: Equal, left: dest, right: Imm32(0x80000000));
474 }
475
476 // Convert 'src' to an integer, and places the resulting 'dest'.
477 // If the result is not representable as a 32 bit value, branch.
478 // May also branch for some values that are representable in 32 bits
479 // (specifically, in this case, 0).
480 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
481 {
482 ASSERT(isSSE2Present());
483 m_assembler.cvttsd2si_rr(src, dst: dest);
484
485 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
486 failureCases.append(jump: branchTest32(cond: Zero, reg: dest));
487
488 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
489 convertInt32ToDouble(src: dest, dest: fpTemp);
490 m_assembler.ucomisd_rr(src: fpTemp, dst: src);
491 failureCases.append(jump: m_assembler.jp());
492 failureCases.append(jump: m_assembler.jne());
493 }
494
495 void zeroDouble(FPRegisterID srcDest)
496 {
497 ASSERT(isSSE2Present());
498 m_assembler.xorpd_rr(src: srcDest, dst: srcDest);
499 }
500
501
502 // Stack manipulation operations:
503 //
504 // The ABI is assumed to provide a stack abstraction to memory,
505 // containing machine word sized units of data. Push and pop
506 // operations add and remove a single register sized unit of data
507 // to or from the stack. Peek and poke operations read or write
508 // values on the stack, without moving the current stack position.
509
510 void pop(RegisterID dest)
511 {
512 m_assembler.pop_r(reg: dest);
513 }
514
515 void push(RegisterID src)
516 {
517 m_assembler.push_r(reg: src);
518 }
519
520 void push(Address address)
521 {
522 m_assembler.push_m(offset: address.offset, base: address.base);
523 }
524
525 void push(Imm32 imm)
526 {
527 m_assembler.push_i32(imm: imm.m_value);
528 }
529
530
531 // Register move operations:
532 //
533 // Move values in registers.
534
535 void move(Imm32 imm, RegisterID dest)
536 {
537 // Note: on 64-bit the Imm32 value is zero extended into the register, it
538 // may be useful to have a separate version that sign extends the value?
539 if (!imm.m_value)
540 m_assembler.xorl_rr(src: dest, dst: dest);
541 else
542 m_assembler.movl_i32r(imm: imm.m_value, dst: dest);
543 }
544
545#if CPU(X86_64)
546 void move(RegisterID src, RegisterID dest)
547 {
548 // Note: on 64-bit this is is a full register move; perhaps it would be
549 // useful to have separate move32 & movePtr, with move32 zero extending?
550 if (src != dest)
551 m_assembler.movq_rr(src, dst: dest);
552 }
553
554 void move(ImmPtr imm, RegisterID dest)
555 {
556 m_assembler.movq_i64r(imm: imm.asIntptr(), dst: dest);
557 }
558
559 void swap(RegisterID reg1, RegisterID reg2)
560 {
561 if (reg1 != reg2)
562 m_assembler.xchgq_rr(src: reg1, dst: reg2);
563 }
564
565 void signExtend32ToPtr(RegisterID src, RegisterID dest)
566 {
567 m_assembler.movsxd_rr(src, dst: dest);
568 }
569
570 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
571 {
572 m_assembler.movl_rr(src, dst: dest);
573 }
574#else
575 void move(RegisterID src, RegisterID dest)
576 {
577 if (src != dest)
578 m_assembler.movl_rr(src, dest);
579 }
580
581 void move(ImmPtr imm, RegisterID dest)
582 {
583 m_assembler.movl_i32r(imm.asIntptr(), dest);
584 }
585
586 void swap(RegisterID reg1, RegisterID reg2)
587 {
588 if (reg1 != reg2)
589 m_assembler.xchgl_rr(reg1, reg2);
590 }
591
592 void signExtend32ToPtr(RegisterID src, RegisterID dest)
593 {
594 move(src, dest);
595 }
596
597 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
598 {
599 move(src, dest);
600 }
601#endif
602
603
604 // Forwards / external control flow operations:
605 //
606 // This set of jump and conditional branch operations return a Jump
607 // object which may linked at a later point, allow forwards jump,
608 // or jumps that will require external linkage (after the code has been
609 // relocated).
610 //
611 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
612 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
613 // used (representing the names 'below' and 'above').
614 //
615 // Operands to the comparision are provided in the expected order, e.g.
616 // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
617 // treated as a signed 32bit value, is less than or equal to 5.
618 //
619 // jz and jnz test whether the first operand is equal to zero, and take
620 // an optional second operand of a mask under which to perform the test.
621
622public:
623 Jump branch32(Condition cond, RegisterID left, RegisterID right)
624 {
625 m_assembler.cmpl_rr(src: right, dst: left);
626 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
627 }
628
629 Jump branch32(Condition cond, RegisterID left, Imm32 right)
630 {
631 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
632 m_assembler.testl_rr(src: left, dst: left);
633 else
634 m_assembler.cmpl_ir(imm: right.m_value, dst: left);
635 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
636 }
637
638 Jump branch32(Condition cond, RegisterID left, Address right)
639 {
640 m_assembler.cmpl_mr(offset: right.offset, base: right.base, src: left);
641 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
642 }
643
644 Jump branch32(Condition cond, Address left, RegisterID right)
645 {
646 m_assembler.cmpl_rm(src: right, offset: left.offset, base: left.base);
647 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
648 }
649
650 Jump branch32(Condition cond, Address left, Imm32 right)
651 {
652 m_assembler.cmpl_im(imm: right.m_value, offset: left.offset, base: left.base);
653 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
654 }
655
656 Jump branch32(Condition cond, BaseIndex left, Imm32 right)
657 {
658 m_assembler.cmpl_im(imm: right.m_value, offset: left.offset, base: left.base, index: left.index, scale: left.scale);
659 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
660 }
661
662 Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right)
663 {
664 return branch32(cond, left, right);
665 }
666
667 Jump branch16(Condition cond, BaseIndex left, RegisterID right)
668 {
669 m_assembler.cmpw_rm(src: right, offset: left.offset, base: left.base, index: left.index, scale: left.scale);
670 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
671 }
672
673 Jump branch16(Condition cond, BaseIndex left, Imm32 right)
674 {
675 ASSERT(!(right.m_value & 0xFFFF0000));
676
677 m_assembler.cmpw_im(imm: right.m_value, offset: left.offset, base: left.base, index: left.index, scale: left.scale);
678 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
679 }
680
681 Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
682 {
683 ASSERT((cond == Zero) || (cond == NonZero));
684 m_assembler.testl_rr(src: reg, dst: mask);
685 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
686 }
687
688 Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
689 {
690 ASSERT((cond == Zero) || (cond == NonZero));
691 // if we are only interested in the low seven bits, this can be tested with a testb
692 if (mask.m_value == -1)
693 m_assembler.testl_rr(src: reg, dst: reg);
694 else if ((mask.m_value & ~0x7f) == 0)
695 m_assembler.testb_i8r(imm: mask.m_value, dst: reg);
696 else
697 m_assembler.testl_i32r(imm: mask.m_value, dst: reg);
698 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
699 }
700
701 Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
702 {
703 ASSERT((cond == Zero) || (cond == NonZero));
704 if (mask.m_value == -1)
705 m_assembler.cmpl_im(imm: 0, offset: address.offset, base: address.base);
706 else
707 m_assembler.testl_i32m(imm: mask.m_value, offset: address.offset, base: address.base);
708 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
709 }
710
711 Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
712 {
713 ASSERT((cond == Zero) || (cond == NonZero));
714 if (mask.m_value == -1)
715 m_assembler.cmpl_im(imm: 0, offset: address.offset, base: address.base, index: address.index, scale: address.scale);
716 else
717 m_assembler.testl_i32m(imm: mask.m_value, offset: address.offset, base: address.base, index: address.index, scale: address.scale);
718 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
719 }
720
721 Jump jump()
722 {
723 return Jump(m_assembler.jmp());
724 }
725
726 void jump(RegisterID target)
727 {
728 m_assembler.jmp_r(dst: target);
729 }
730
731 // Address is a memory location containing the address to jump to
732 void jump(Address address)
733 {
734 m_assembler.jmp_m(offset: address.offset, base: address.base);
735 }
736
737
738 // Arithmetic control flow operations:
739 //
740 // This set of conditional branch operations branch based
741 // on the result of an arithmetic operation. The operation
742 // is performed as normal, storing the result.
743 //
744 // * jz operations branch if the result is zero.
745 // * jo operations branch if the (signed) arithmetic
746 // operation caused an overflow to occur.
747
748 Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
749 {
750 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
751 add32(src, dest);
752 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
753 }
754
755 Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
756 {
757 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
758 add32(imm, dest);
759 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
760 }
761
762 Jump branchAdd32(Condition cond, Imm32 src, Address dest)
763 {
764 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
765 add32(imm: src, address: dest);
766 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
767 }
768
769 Jump branchAdd32(Condition cond, RegisterID src, Address dest)
770 {
771 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
772 add32(src, dest);
773 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
774 }
775
776 Jump branchAdd32(Condition cond, Address src, RegisterID dest)
777 {
778 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
779 add32(src, dest);
780 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
781 }
782
783 Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
784 {
785 ASSERT(cond == Overflow);
786 mul32(src, dest);
787 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
788 }
789
790 Jump branchMul32(Condition cond, Address src, RegisterID dest)
791 {
792 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
793 mul32(src, dest);
794 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
795 }
796
797 Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
798 {
799 ASSERT(cond == Overflow);
800 mul32(imm, src, dest);
801 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
802 }
803
804 Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
805 {
806 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
807 sub32(src, dest);
808 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
809 }
810
811 Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
812 {
813 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
814 sub32(imm, dest);
815 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
816 }
817
818 Jump branchSub32(Condition cond, Imm32 imm, Address dest)
819 {
820 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
821 sub32(imm, address: dest);
822 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
823 }
824
825 Jump branchSub32(Condition cond, RegisterID src, Address dest)
826 {
827 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
828 sub32(src, dest);
829 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
830 }
831
832 Jump branchSub32(Condition cond, Address src, RegisterID dest)
833 {
834 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
835 sub32(src, dest);
836 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
837 }
838
839 Jump branchNeg32(Condition cond, RegisterID srcDest)
840 {
841 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
842 neg32(srcDest);
843 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
844 }
845
846 Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
847 {
848 ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
849 or32(src, dest);
850 return Jump(m_assembler.jCC(cond: x86Condition(cond)));
851 }
852
853
854 // Miscellaneous operations:
855
856 void breakpoint()
857 {
858 m_assembler.int3();
859 }
860
861 Call nearCall()
862 {
863 return Call(m_assembler.call(), Call::LinkableNear);
864 }
865
866 Call call(RegisterID target)
867 {
868 return Call(m_assembler.call(dst: target), Call::None);
869 }
870
871 void call(Address address)
872 {
873 m_assembler.call_m(offset: address.offset, base: address.base);
874 }
875
876 void ret()
877 {
878 m_assembler.ret();
879 }
880
881 void set8(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
882 {
883 m_assembler.cmpl_rr(src: right, dst: left);
884 m_assembler.setCC_r(cond: x86Condition(cond), dst: dest);
885 }
886
887 void set8(Condition cond, Address left, RegisterID right, RegisterID dest)
888 {
889 m_assembler.cmpl_mr(offset: left.offset, base: left.base, src: right);
890 m_assembler.setCC_r(cond: x86Condition(cond), dst: dest);
891 }
892
893 void set8(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
894 {
895 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
896 m_assembler.testl_rr(src: left, dst: left);
897 else
898 m_assembler.cmpl_ir(imm: right.m_value, dst: left);
899 m_assembler.setCC_r(cond: x86Condition(cond), dst: dest);
900 }
901
902 void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
903 {
904 m_assembler.cmpl_rr(src: right, dst: left);
905 m_assembler.setCC_r(cond: x86Condition(cond), dst: dest);
906 m_assembler.movzbl_rr(src: dest, dst: dest);
907 }
908
909 void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
910 {
911 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
912 m_assembler.testl_rr(src: left, dst: left);
913 else
914 m_assembler.cmpl_ir(imm: right.m_value, dst: left);
915 m_assembler.setCC_r(cond: x86Condition(cond), dst: dest);
916 m_assembler.movzbl_rr(src: dest, dst: dest);
917 }
918
919 // FIXME:
920 // The mask should be optional... paerhaps the argument order should be
921 // dest-src, operations always have a dest? ... possibly not true, considering
922 // asm ops like test, or pseudo ops like pop().
923
924 void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
925 {
926 if (mask.m_value == -1)
927 m_assembler.cmpl_im(imm: 0, offset: address.offset, base: address.base);
928 else
929 m_assembler.testl_i32m(imm: mask.m_value, offset: address.offset, base: address.base);
930 m_assembler.setCC_r(cond: x86Condition(cond), dst: dest);
931 }
932
933 void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
934 {
935 if (mask.m_value == -1)
936 m_assembler.cmpl_im(imm: 0, offset: address.offset, base: address.base);
937 else
938 m_assembler.testl_i32m(imm: mask.m_value, offset: address.offset, base: address.base);
939 m_assembler.setCC_r(cond: x86Condition(cond), dst: dest);
940 m_assembler.movzbl_rr(src: dest, dst: dest);
941 }
942
943protected:
944 X86Assembler::Condition x86Condition(Condition cond)
945 {
946 return static_cast<X86Assembler::Condition>(cond);
947 }
948
949private:
950 // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
951 // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
952 friend class MacroAssemblerX86;
953
954#if CPU(X86)
955#if OS(MAC_OS_X)
956
957 // All X86 Macs are guaranteed to support at least SSE2,
958 static bool isSSE2Present()
959 {
960 return true;
961 }
962
963#else // OS(MAC_OS_X)
964
965 enum SSE2CheckState {
966 NotCheckedSSE2,
967 HasSSE2,
968 NoSSE2
969 };
970
971 static bool isSSE2Present()
972 {
973 if (s_sse2CheckState == NotCheckedSSE2) {
974 // Default the flags value to zero; if the compiler is
975 // not MSVC or GCC we will read this as SSE2 not present.
976 int flags = 0;
977#if COMPILER(MSVC)
978 _asm {
979 mov eax, 1 // cpuid function 1 gives us the standard feature set
980 cpuid;
981 mov flags, edx;
982 }
983#elif COMPILER(GCC)
984 asm (
985 "movl $0x1, %%eax;"
986 "pushl %%ebx;"
987 "cpuid;"
988 "popl %%ebx;"
989 "movl %%edx, %0;"
990 : "=g" (flags)
991 :
992 : "%eax", "%ecx", "%edx"
993 );
994#endif
995 static const int SSE2FeatureBit = 1 << 26;
996 s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
997 }
998 // Only check once.
999 ASSERT(s_sse2CheckState != NotCheckedSSE2);
1000
1001 return s_sse2CheckState == HasSSE2;
1002 }
1003
1004 static SSE2CheckState s_sse2CheckState;
1005
1006#endif // OS(MAC_OS_X)
1007#elif !defined(NDEBUG) // CPU(X86)
1008
1009 // On x86-64 we should never be checking for SSE2 in a non-debug build,
1010 // but non debug add this method to keep the asserts above happy.
1011 static bool isSSE2Present()
1012 {
1013 return true;
1014 }
1015
1016#endif
1017};
1018
1019} // namespace JSC
1020
1021#endif // ENABLE(ASSEMBLER)
1022
1023#endif // MacroAssemblerX86Common_h
1024

source code of qtscript/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86Common.h