1/*
2 * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef X86Assembler_h
27#define X86Assembler_h
28
29#include <Platform.h>
30
31#if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
32
33#include "AssemblerBuffer.h"
34#include "AbstractMacroAssembler.h"
35#include "JITCompilationEffort.h"
36#include <stdint.h>
37#include <wtf/Assertions.h>
38#include <wtf/Vector.h>
39
40namespace JSC {
41
42inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
43
44namespace X86Registers {
45 typedef enum {
46 eax,
47 ecx,
48 edx,
49 ebx,
50 esp,
51 ebp,
52 esi,
53 edi,
54
55#if CPU(X86_64)
56 r8,
57 r9,
58 r10,
59 r11,
60 r12,
61 r13,
62 r14,
63 r15,
64#endif
65 none = 0xff,
66 } RegisterID;
67
68 typedef enum {
69 xmm0,
70 xmm1,
71 xmm2,
72 xmm3,
73 xmm4,
74 xmm5,
75 xmm6,
76 xmm7,
77 } XMMRegisterID;
78}
79
80class X86Assembler {
81public:
82 typedef X86Registers::RegisterID RegisterID;
83 typedef X86Registers::XMMRegisterID XMMRegisterID;
84 typedef XMMRegisterID FPRegisterID;
85
86 typedef enum {
87 ConditionO,
88 ConditionNO,
89 ConditionB,
90 ConditionAE,
91 ConditionE,
92 ConditionNE,
93 ConditionBE,
94 ConditionA,
95 ConditionS,
96 ConditionNS,
97 ConditionP,
98 ConditionNP,
99 ConditionL,
100 ConditionGE,
101 ConditionLE,
102 ConditionG,
103
104 ConditionC = ConditionB,
105 ConditionNC = ConditionAE,
106 } Condition;
107
108private:
109 typedef enum {
110 OP_ADD_EvGv = 0x01,
111 OP_ADD_GvEv = 0x03,
112 OP_OR_EvGv = 0x09,
113 OP_OR_GvEv = 0x0B,
114 OP_2BYTE_ESCAPE = 0x0F,
115 OP_AND_EvGv = 0x21,
116 OP_AND_GvEv = 0x23,
117 OP_SUB_EvGv = 0x29,
118 OP_SUB_GvEv = 0x2B,
119 PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
120 OP_XOR_EvGv = 0x31,
121 OP_XOR_GvEv = 0x33,
122 OP_CMP_EvGv = 0x39,
123 OP_CMP_GvEv = 0x3B,
124#if CPU(X86_64)
125 PRE_REX = 0x40,
126#endif
127 OP_PUSH_EAX = 0x50,
128 OP_POP_EAX = 0x58,
129#if CPU(X86_64)
130 OP_MOVSXD_GvEv = 0x63,
131#endif
132 PRE_OPERAND_SIZE = 0x66,
133 PRE_SSE_66 = 0x66,
134 OP_PUSH_Iz = 0x68,
135 OP_IMUL_GvEvIz = 0x69,
136 OP_GROUP1_EbIb = 0x80,
137 OP_GROUP1_EvIz = 0x81,
138 OP_GROUP1_EvIb = 0x83,
139 OP_TEST_EbGb = 0x84,
140 OP_TEST_EvGv = 0x85,
141 OP_XCHG_EvGv = 0x87,
142 OP_MOV_EbGb = 0x88,
143 OP_MOV_EvGv = 0x89,
144 OP_MOV_GvEv = 0x8B,
145 OP_LEA = 0x8D,
146 OP_GROUP1A_Ev = 0x8F,
147 OP_NOP = 0x90,
148 OP_CDQ = 0x99,
149 OP_MOV_EAXOv = 0xA1,
150 OP_MOV_OvEAX = 0xA3,
151 OP_MOV_EAXIv = 0xB8,
152 OP_GROUP2_EvIb = 0xC1,
153 OP_RET = 0xC3,
154 OP_GROUP11_EvIb = 0xC6,
155 OP_GROUP11_EvIz = 0xC7,
156 OP_INT3 = 0xCC,
157 OP_GROUP2_Ev1 = 0xD1,
158 OP_GROUP2_EvCL = 0xD3,
159 OP_ESCAPE_DD = 0xDD,
160 OP_CALL_rel32 = 0xE8,
161 OP_JMP_rel32 = 0xE9,
162 PRE_SSE_F2 = 0xF2,
163 PRE_SSE_F3 = 0xF3,
164 OP_HLT = 0xF4,
165 OP_GROUP3_EbIb = 0xF6,
166 OP_GROUP3_Ev = 0xF7,
167 OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
168 OP_GROUP5_Ev = 0xFF,
169 } OneByteOpcodeID;
170
171 typedef enum {
172 OP2_MOVSD_VsdWsd = 0x10,
173 OP2_MOVSD_WsdVsd = 0x11,
174 OP2_MOVSS_VsdWsd = 0x10,
175 OP2_MOVSS_WsdVsd = 0x11,
176 OP2_CVTSI2SD_VsdEd = 0x2A,
177 OP2_CVTTSD2SI_GdWsd = 0x2C,
178 OP2_UCOMISD_VsdWsd = 0x2E,
179 OP2_ADDSD_VsdWsd = 0x58,
180 OP2_MULSD_VsdWsd = 0x59,
181 OP2_CVTSD2SS_VsdWsd = 0x5A,
182 OP2_CVTSS2SD_VsdWsd = 0x5A,
183 OP2_SUBSD_VsdWsd = 0x5C,
184 OP2_DIVSD_VsdWsd = 0x5E,
185 OP2_SQRTSD_VsdWsd = 0x51,
186 OP2_ANDNPD_VpdWpd = 0x55,
187 OP2_XORPD_VpdWpd = 0x57,
188 OP2_MOVD_VdEd = 0x6E,
189 OP2_MOVD_EdVd = 0x7E,
190 OP2_JCC_rel32 = 0x80,
191 OP_SETCC = 0x90,
192 OP2_IMUL_GvEv = 0xAF,
193 OP2_MOVZX_GvEb = 0xB6,
194 OP2_MOVSX_GvEb = 0xBE,
195 OP2_MOVZX_GvEw = 0xB7,
196 OP2_MOVSX_GvEw = 0xBF,
197 OP2_PEXTRW_GdUdIb = 0xC5,
198 OP2_PSLLQ_UdqIb = 0x73,
199 OP2_PSRLQ_UdqIb = 0x73,
200 OP2_POR_VdqWdq = 0XEB,
201 } TwoByteOpcodeID;
202
203 TwoByteOpcodeID jccRel32(Condition cond)
204 {
205 return (TwoByteOpcodeID)(int(OP2_JCC_rel32) + cond);
206 }
207
208 TwoByteOpcodeID setccOpcode(Condition cond)
209 {
210 return (TwoByteOpcodeID)(int(OP_SETCC) + cond);
211 }
212
213 typedef enum {
214 GROUP1_OP_ADD = 0,
215 GROUP1_OP_OR = 1,
216 GROUP1_OP_ADC = 2,
217 GROUP1_OP_AND = 4,
218 GROUP1_OP_SUB = 5,
219 GROUP1_OP_XOR = 6,
220 GROUP1_OP_CMP = 7,
221
222 GROUP1A_OP_POP = 0,
223
224 GROUP2_OP_ROL = 0,
225 GROUP2_OP_ROR = 1,
226 GROUP2_OP_RCL = 2,
227 GROUP2_OP_RCR = 3,
228
229 GROUP2_OP_SHL = 4,
230 GROUP2_OP_SHR = 5,
231 GROUP2_OP_SAR = 7,
232
233 GROUP3_OP_TEST = 0,
234 GROUP3_OP_NOT = 2,
235 GROUP3_OP_NEG = 3,
236 GROUP3_OP_IDIV = 7,
237
238 GROUP5_OP_CALLN = 2,
239 GROUP5_OP_JMPN = 4,
240 GROUP5_OP_PUSH = 6,
241
242 GROUP11_MOV = 0,
243
244 GROUP14_OP_PSLLQ = 6,
245 GROUP14_OP_PSRLQ = 2,
246
247 ESCAPE_DD_FSTP_doubleReal = 3,
248 } GroupOpcodeID;
249
250 class X86InstructionFormatter;
251public:
252
253 X86Assembler()
254 : m_indexOfLastWatchpoint(INT_MIN)
255 , m_indexOfTailOfLastWatchpoint(INT_MIN)
256 {
257 }
258
259 // Stack operations:
260
261 void push_r(RegisterID reg)
262 {
263 m_formatter.oneByteOp(opcode: OP_PUSH_EAX, reg);
264 }
265
266 void pop_r(RegisterID reg)
267 {
268 m_formatter.oneByteOp(opcode: OP_POP_EAX, reg);
269 }
270
271 void push_i32(int imm)
272 {
273 m_formatter.oneByteOp(opcode: OP_PUSH_Iz);
274 m_formatter.immediate32(imm);
275 }
276
277 void push_m(int offset, RegisterID base)
278 {
279 m_formatter.oneByteOp(opcode: OP_GROUP5_Ev, reg: GROUP5_OP_PUSH, base, offset);
280 }
281
282 void pop_m(int offset, RegisterID base)
283 {
284 m_formatter.oneByteOp(opcode: OP_GROUP1A_Ev, reg: GROUP1A_OP_POP, base, offset);
285 }
286
287 // Arithmetic operations:
288
289#if !CPU(X86_64)
290 void adcl_im(int imm, const void* addr)
291 {
292 if (CAN_SIGN_EXTEND_8_32(imm)) {
293 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
294 m_formatter.immediate8(imm);
295 } else {
296 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
297 m_formatter.immediate32(imm);
298 }
299 }
300#endif
301
302 void addl_rr(RegisterID src, RegisterID dst)
303 {
304 m_formatter.oneByteOp(opcode: OP_ADD_EvGv, reg: src, rm: dst);
305 }
306
307 void addl_mr(int offset, RegisterID base, RegisterID dst)
308 {
309 m_formatter.oneByteOp(opcode: OP_ADD_GvEv, reg: dst, base, offset);
310 }
311
312#if !CPU(X86_64)
313 void addl_mr(const void* addr, RegisterID dst)
314 {
315 m_formatter.oneByteOp(OP_ADD_GvEv, dst, addr);
316 }
317#endif
318
319 void addl_rm(RegisterID src, int offset, RegisterID base)
320 {
321 m_formatter.oneByteOp(opcode: OP_ADD_EvGv, reg: src, base, offset);
322 }
323
324 void addl_ir(int imm, RegisterID dst)
325 {
326 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
327 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_ADD, rm: dst);
328 m_formatter.immediate8(imm);
329 } else {
330 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_ADD, rm: dst);
331 m_formatter.immediate32(imm);
332 }
333 }
334
335 void addl_im(int imm, int offset, RegisterID base)
336 {
337 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
338 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_ADD, base, offset);
339 m_formatter.immediate8(imm);
340 } else {
341 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_ADD, base, offset);
342 m_formatter.immediate32(imm);
343 }
344 }
345
346#if CPU(X86_64)
347 void addq_rr(RegisterID src, RegisterID dst)
348 {
349 m_formatter.oneByteOp64(opcode: OP_ADD_EvGv, reg: src, rm: dst);
350 }
351
352 void addq_mr(int offset, RegisterID base, RegisterID dst)
353 {
354 m_formatter.oneByteOp64(opcode: OP_ADD_GvEv, reg: dst, base, offset);
355 }
356
357 void addq_ir(int imm, RegisterID dst)
358 {
359 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
360 m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_ADD, rm: dst);
361 m_formatter.immediate8(imm);
362 } else {
363 m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_ADD, rm: dst);
364 m_formatter.immediate32(imm);
365 }
366 }
367
368 void addq_im(int imm, int offset, RegisterID base)
369 {
370 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
371 m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_ADD, base, offset);
372 m_formatter.immediate8(imm);
373 } else {
374 m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_ADD, base, offset);
375 m_formatter.immediate32(imm);
376 }
377 }
378#else
379 void addl_im(int imm, const void* addr)
380 {
381 if (CAN_SIGN_EXTEND_8_32(imm)) {
382 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
383 m_formatter.immediate8(imm);
384 } else {
385 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
386 m_formatter.immediate32(imm);
387 }
388 }
389#endif
390
391 void andl_rr(RegisterID src, RegisterID dst)
392 {
393 m_formatter.oneByteOp(opcode: OP_AND_EvGv, reg: src, rm: dst);
394 }
395
396 void andl_mr(int offset, RegisterID base, RegisterID dst)
397 {
398 m_formatter.oneByteOp(opcode: OP_AND_GvEv, reg: dst, base, offset);
399 }
400
401 void andl_rm(RegisterID src, int offset, RegisterID base)
402 {
403 m_formatter.oneByteOp(opcode: OP_AND_EvGv, reg: src, base, offset);
404 }
405
406 void andl_ir(int imm, RegisterID dst)
407 {
408 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
409 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_AND, rm: dst);
410 m_formatter.immediate8(imm);
411 } else {
412 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_AND, rm: dst);
413 m_formatter.immediate32(imm);
414 }
415 }
416
417 void andl_im(int imm, int offset, RegisterID base)
418 {
419 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
420 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_AND, base, offset);
421 m_formatter.immediate8(imm);
422 } else {
423 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_AND, base, offset);
424 m_formatter.immediate32(imm);
425 }
426 }
427
428#if CPU(X86_64)
429 void andq_rr(RegisterID src, RegisterID dst)
430 {
431 m_formatter.oneByteOp64(opcode: OP_AND_EvGv, reg: src, rm: dst);
432 }
433
434 void andq_ir(int imm, RegisterID dst)
435 {
436 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
437 m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_AND, rm: dst);
438 m_formatter.immediate8(imm);
439 } else {
440 m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_AND, rm: dst);
441 m_formatter.immediate32(imm);
442 }
443 }
444#else
445 void andl_im(int imm, const void* addr)
446 {
447 if (CAN_SIGN_EXTEND_8_32(imm)) {
448 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
449 m_formatter.immediate8(imm);
450 } else {
451 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
452 m_formatter.immediate32(imm);
453 }
454 }
455#endif
456
457 void negl_r(RegisterID dst)
458 {
459 m_formatter.oneByteOp(opcode: OP_GROUP3_Ev, reg: GROUP3_OP_NEG, rm: dst);
460 }
461
462#if CPU(X86_64)
463 void negq_r(RegisterID dst)
464 {
465 m_formatter.oneByteOp64(opcode: OP_GROUP3_Ev, reg: GROUP3_OP_NEG, rm: dst);
466 }
467#endif
468
469 void negl_m(int offset, RegisterID base)
470 {
471 m_formatter.oneByteOp(opcode: OP_GROUP3_Ev, reg: GROUP3_OP_NEG, base, offset);
472 }
473
474 void notl_r(RegisterID dst)
475 {
476 m_formatter.oneByteOp(opcode: OP_GROUP3_Ev, reg: GROUP3_OP_NOT, rm: dst);
477 }
478
479 void notl_m(int offset, RegisterID base)
480 {
481 m_formatter.oneByteOp(opcode: OP_GROUP3_Ev, reg: GROUP3_OP_NOT, base, offset);
482 }
483
484 void orl_rr(RegisterID src, RegisterID dst)
485 {
486 m_formatter.oneByteOp(opcode: OP_OR_EvGv, reg: src, rm: dst);
487 }
488
489 void orl_mr(int offset, RegisterID base, RegisterID dst)
490 {
491 m_formatter.oneByteOp(opcode: OP_OR_GvEv, reg: dst, base, offset);
492 }
493
494 void orl_rm(RegisterID src, int offset, RegisterID base)
495 {
496 m_formatter.oneByteOp(opcode: OP_OR_EvGv, reg: src, base, offset);
497 }
498
499 void orl_ir(int imm, RegisterID dst)
500 {
501 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
502 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_OR, rm: dst);
503 m_formatter.immediate8(imm);
504 } else {
505 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_OR, rm: dst);
506 m_formatter.immediate32(imm);
507 }
508 }
509
510 void orl_im(int imm, int offset, RegisterID base)
511 {
512 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
513 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_OR, base, offset);
514 m_formatter.immediate8(imm);
515 } else {
516 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_OR, base, offset);
517 m_formatter.immediate32(imm);
518 }
519 }
520
521#if CPU(X86_64)
522 void orq_rr(RegisterID src, RegisterID dst)
523 {
524 m_formatter.oneByteOp64(opcode: OP_OR_EvGv, reg: src, rm: dst);
525 }
526
527 void orq_ir(int imm, RegisterID dst)
528 {
529 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
530 m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_OR, rm: dst);
531 m_formatter.immediate8(imm);
532 } else {
533 m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_OR, rm: dst);
534 m_formatter.immediate32(imm);
535 }
536 }
537#else
538 void orl_im(int imm, const void* addr)
539 {
540 if (CAN_SIGN_EXTEND_8_32(imm)) {
541 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
542 m_formatter.immediate8(imm);
543 } else {
544 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
545 m_formatter.immediate32(imm);
546 }
547 }
548
549 void orl_rm(RegisterID src, const void* addr)
550 {
551 m_formatter.oneByteOp(OP_OR_EvGv, src, addr);
552 }
553#endif
554
555 void subl_rr(RegisterID src, RegisterID dst)
556 {
557 m_formatter.oneByteOp(opcode: OP_SUB_EvGv, reg: src, rm: dst);
558 }
559
560 void subl_mr(int offset, RegisterID base, RegisterID dst)
561 {
562 m_formatter.oneByteOp(opcode: OP_SUB_GvEv, reg: dst, base, offset);
563 }
564
565 void subl_rm(RegisterID src, int offset, RegisterID base)
566 {
567 m_formatter.oneByteOp(opcode: OP_SUB_EvGv, reg: src, base, offset);
568 }
569
570 void subl_ir(int imm, RegisterID dst)
571 {
572 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
573 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_SUB, rm: dst);
574 m_formatter.immediate8(imm);
575 } else {
576 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_SUB, rm: dst);
577 m_formatter.immediate32(imm);
578 }
579 }
580
581 void subl_im(int imm, int offset, RegisterID base)
582 {
583 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
584 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_SUB, base, offset);
585 m_formatter.immediate8(imm);
586 } else {
587 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_SUB, base, offset);
588 m_formatter.immediate32(imm);
589 }
590 }
591
592#if CPU(X86_64)
593 void subq_rr(RegisterID src, RegisterID dst)
594 {
595 m_formatter.oneByteOp64(opcode: OP_SUB_EvGv, reg: src, rm: dst);
596 }
597
598 void subq_ir(int imm, RegisterID dst)
599 {
600 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
601 m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_SUB, rm: dst);
602 m_formatter.immediate8(imm);
603 } else {
604 m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_SUB, rm: dst);
605 m_formatter.immediate32(imm);
606 }
607 }
608#else
609 void subl_im(int imm, const void* addr)
610 {
611 if (CAN_SIGN_EXTEND_8_32(imm)) {
612 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
613 m_formatter.immediate8(imm);
614 } else {
615 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
616 m_formatter.immediate32(imm);
617 }
618 }
619#endif
620
621 void xorl_rr(RegisterID src, RegisterID dst)
622 {
623 m_formatter.oneByteOp(opcode: OP_XOR_EvGv, reg: src, rm: dst);
624 }
625
626 void xorl_mr(int offset, RegisterID base, RegisterID dst)
627 {
628 m_formatter.oneByteOp(opcode: OP_XOR_GvEv, reg: dst, base, offset);
629 }
630
631 void xorl_rm(RegisterID src, int offset, RegisterID base)
632 {
633 m_formatter.oneByteOp(opcode: OP_XOR_EvGv, reg: src, base, offset);
634 }
635
636 void xorl_im(int imm, int offset, RegisterID base)
637 {
638 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
639 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_XOR, base, offset);
640 m_formatter.immediate8(imm);
641 } else {
642 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_XOR, base, offset);
643 m_formatter.immediate32(imm);
644 }
645 }
646
647 void xorl_ir(int imm, RegisterID dst)
648 {
649 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
650 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_XOR, rm: dst);
651 m_formatter.immediate8(imm);
652 } else {
653 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_XOR, rm: dst);
654 m_formatter.immediate32(imm);
655 }
656 }
657
658#if CPU(X86_64)
659 void xorq_rr(RegisterID src, RegisterID dst)
660 {
661 m_formatter.oneByteOp64(opcode: OP_XOR_EvGv, reg: src, rm: dst);
662 }
663
664 void xorq_ir(int imm, RegisterID dst)
665 {
666 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
667 m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_XOR, rm: dst);
668 m_formatter.immediate8(imm);
669 } else {
670 m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_XOR, rm: dst);
671 m_formatter.immediate32(imm);
672 }
673 }
674
675 void xorq_rm(RegisterID src, int offset, RegisterID base)
676 {
677 m_formatter.oneByteOp64(opcode: OP_XOR_EvGv, reg: src, base, offset);
678 }
679
680 void rorq_i8r(int imm, RegisterID dst)
681 {
682 if (imm == 1)
683 m_formatter.oneByteOp64(opcode: OP_GROUP2_Ev1, reg: GROUP2_OP_ROR, rm: dst);
684 else {
685 m_formatter.oneByteOp64(opcode: OP_GROUP2_EvIb, reg: GROUP2_OP_ROR, rm: dst);
686 m_formatter.immediate8(imm);
687 }
688 }
689
690 void sarq_CLr(RegisterID dst)
691 {
692 m_formatter.oneByteOp64(opcode: OP_GROUP2_EvCL, reg: GROUP2_OP_SAR, rm: dst);
693 }
694
695 void sarq_i8r(int imm, RegisterID dst)
696 {
697 if (imm == 1)
698 m_formatter.oneByteOp64(opcode: OP_GROUP2_Ev1, reg: GROUP2_OP_SAR, rm: dst);
699 else {
700 m_formatter.oneByteOp64(opcode: OP_GROUP2_EvIb, reg: GROUP2_OP_SAR, rm: dst);
701 m_formatter.immediate8(imm);
702 }
703 }
704
705 void shrq_i8r(int imm, RegisterID dst)
706 {
707 // ### doesn't work when removing the "0 &&"
708 if (0 && imm == 1)
709 m_formatter.oneByteOp64(opcode: OP_GROUP2_Ev1, reg: GROUP2_OP_SHR, rm: dst);
710 else {
711 m_formatter.oneByteOp64(opcode: OP_GROUP2_EvIb, reg: GROUP2_OP_SHR, rm: dst);
712 m_formatter.immediate8(imm);
713 }
714 }
715
716 void shrq_CLr(RegisterID dst)
717 {
718 m_formatter.oneByteOp64(opcode: OP_GROUP2_EvCL, reg: GROUP2_OP_SHR, rm: dst);
719 }
720
721 void shlq_i8r(int imm, RegisterID dst)
722 {
723 // ### doesn't work when removing the "0 &&"
724 if (0 && imm == 1)
725 m_formatter.oneByteOp64(opcode: OP_GROUP2_Ev1, reg: GROUP2_OP_SHL, rm: dst);
726 else {
727 m_formatter.oneByteOp64(opcode: OP_GROUP2_EvIb, reg: GROUP2_OP_SHL, rm: dst);
728 m_formatter.immediate8(imm);
729 }
730 }
731
732 void shlq_CLr(RegisterID dst)
733 {
734 m_formatter.oneByteOp64(opcode: OP_GROUP2_EvCL, reg: GROUP2_OP_SHL, rm: dst);
735 }
736#endif
737
738 void sarl_i8r(int imm, RegisterID dst)
739 {
740 if (imm == 1)
741 m_formatter.oneByteOp(opcode: OP_GROUP2_Ev1, reg: GROUP2_OP_SAR, rm: dst);
742 else {
743 m_formatter.oneByteOp(opcode: OP_GROUP2_EvIb, reg: GROUP2_OP_SAR, rm: dst);
744 m_formatter.immediate8(imm);
745 }
746 }
747
748 void sarl_CLr(RegisterID dst)
749 {
750 m_formatter.oneByteOp(opcode: OP_GROUP2_EvCL, reg: GROUP2_OP_SAR, rm: dst);
751 }
752
753 void shrl_i8r(int imm, RegisterID dst)
754 {
755 if (imm == 1)
756 m_formatter.oneByteOp(opcode: OP_GROUP2_Ev1, reg: GROUP2_OP_SHR, rm: dst);
757 else {
758 m_formatter.oneByteOp(opcode: OP_GROUP2_EvIb, reg: GROUP2_OP_SHR, rm: dst);
759 m_formatter.immediate8(imm);
760 }
761 }
762
763 void shrl_CLr(RegisterID dst)
764 {
765 m_formatter.oneByteOp(opcode: OP_GROUP2_EvCL, reg: GROUP2_OP_SHR, rm: dst);
766 }
767
768 void shll_i8r(int imm, RegisterID dst)
769 {
770 if (imm == 1)
771 m_formatter.oneByteOp(opcode: OP_GROUP2_Ev1, reg: GROUP2_OP_SHL, rm: dst);
772 else {
773 m_formatter.oneByteOp(opcode: OP_GROUP2_EvIb, reg: GROUP2_OP_SHL, rm: dst);
774 m_formatter.immediate8(imm);
775 }
776 }
777
778 void shll_CLr(RegisterID dst)
779 {
780 m_formatter.oneByteOp(opcode: OP_GROUP2_EvCL, reg: GROUP2_OP_SHL, rm: dst);
781 }
782
783 void imull_rr(RegisterID src, RegisterID dst)
784 {
785 m_formatter.twoByteOp(opcode: OP2_IMUL_GvEv, reg: dst, rm: src);
786 }
787
788 void imull_mr(int offset, RegisterID base, RegisterID dst)
789 {
790 m_formatter.twoByteOp(opcode: OP2_IMUL_GvEv, reg: dst, base, offset);
791 }
792
793 void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
794 {
795 m_formatter.oneByteOp(opcode: OP_IMUL_GvEvIz, reg: dst, rm: src);
796 m_formatter.immediate32(imm: value);
797 }
798
799 void idivl_r(RegisterID dst)
800 {
801 m_formatter.oneByteOp(opcode: OP_GROUP3_Ev, reg: GROUP3_OP_IDIV, rm: dst);
802 }
803
804 // Comparisons:
805
806 void cmpl_rr(RegisterID src, RegisterID dst)
807 {
808 m_formatter.oneByteOp(opcode: OP_CMP_EvGv, reg: src, rm: dst);
809 }
810
811 void cmpl_rm(RegisterID src, int offset, RegisterID base)
812 {
813 m_formatter.oneByteOp(opcode: OP_CMP_EvGv, reg: src, base, offset);
814 }
815
816 void cmpl_mr(int offset, RegisterID base, RegisterID src)
817 {
818 m_formatter.oneByteOp(opcode: OP_CMP_GvEv, reg: src, base, offset);
819 }
820
821 void cmpl_ir(int imm, RegisterID dst)
822 {
823 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
824 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_CMP, rm: dst);
825 m_formatter.immediate8(imm);
826 } else {
827 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, rm: dst);
828 m_formatter.immediate32(imm);
829 }
830 }
831
832 void cmpl_ir_force32(int imm, RegisterID dst)
833 {
834 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, rm: dst);
835 m_formatter.immediate32(imm);
836 }
837
838 void cmpl_im(int imm, int offset, RegisterID base)
839 {
840 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
841 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_CMP, base, offset);
842 m_formatter.immediate8(imm);
843 } else {
844 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, base, offset);
845 m_formatter.immediate32(imm);
846 }
847 }
848
849 void cmpb_im(int imm, int offset, RegisterID base)
850 {
851 m_formatter.oneByteOp(opcode: OP_GROUP1_EbIb, reg: GROUP1_OP_CMP, base, offset);
852 m_formatter.immediate8(imm);
853 }
854
855 void cmpb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
856 {
857 m_formatter.oneByteOp(opcode: OP_GROUP1_EbIb, reg: GROUP1_OP_CMP, base, index, scale, offset);
858 m_formatter.immediate8(imm);
859 }
860
861#if CPU(X86)
862 void cmpb_im(int imm, const void* addr)
863 {
864 m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, addr);
865 m_formatter.immediate8(imm);
866 }
867#endif
868
869 void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
870 {
871 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
872 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_CMP, base, index, scale, offset);
873 m_formatter.immediate8(imm);
874 } else {
875 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, base, index, scale, offset);
876 m_formatter.immediate32(imm);
877 }
878 }
879
880 void cmpl_im_force32(int imm, int offset, RegisterID base)
881 {
882 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, base, offset);
883 m_formatter.immediate32(imm);
884 }
885
886#if CPU(X86_64)
887 void cmpq_rr(RegisterID src, RegisterID dst)
888 {
889 m_formatter.oneByteOp64(opcode: OP_CMP_EvGv, reg: src, rm: dst);
890 }
891
892 void cmpq_rm(RegisterID src, int offset, RegisterID base)
893 {
894 m_formatter.oneByteOp64(opcode: OP_CMP_EvGv, reg: src, base, offset);
895 }
896
897 void cmpq_mr(int offset, RegisterID base, RegisterID src)
898 {
899 m_formatter.oneByteOp64(opcode: OP_CMP_GvEv, reg: src, base, offset);
900 }
901
902 void cmpq_ir(int imm, RegisterID dst)
903 {
904 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
905 m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_CMP, rm: dst);
906 m_formatter.immediate8(imm);
907 } else {
908 m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, rm: dst);
909 m_formatter.immediate32(imm);
910 }
911 }
912
913 void cmpq_im(int imm, int offset, RegisterID base)
914 {
915 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
916 m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_CMP, base, offset);
917 m_formatter.immediate8(imm);
918 } else {
919 m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, base, offset);
920 m_formatter.immediate32(imm);
921 }
922 }
923
924 void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
925 {
926 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
927 m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_CMP, base, index, scale, offset);
928 m_formatter.immediate8(imm);
929 } else {
930 m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, base, index, scale, offset);
931 m_formatter.immediate32(imm);
932 }
933 }
934#else
935 void cmpl_rm(RegisterID reg, const void* addr)
936 {
937 m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
938 }
939
940 void cmpl_im(int imm, const void* addr)
941 {
942 if (CAN_SIGN_EXTEND_8_32(imm)) {
943 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
944 m_formatter.immediate8(imm);
945 } else {
946 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
947 m_formatter.immediate32(imm);
948 }
949 }
950#endif
951
952 void cmpw_ir(int imm, RegisterID dst)
953 {
954 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
955 m_formatter.prefix(pre: PRE_OPERAND_SIZE);
956 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_CMP, rm: dst);
957 m_formatter.immediate8(imm);
958 } else {
959 m_formatter.prefix(pre: PRE_OPERAND_SIZE);
960 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, rm: dst);
961 m_formatter.immediate16(imm);
962 }
963 }
964
965 void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
966 {
967 m_formatter.prefix(pre: PRE_OPERAND_SIZE);
968 m_formatter.oneByteOp(opcode: OP_CMP_EvGv, reg: src, base, index, scale, offset);
969 }
970
971 void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
972 {
973 if (CAN_SIGN_EXTEND_8_32(value: imm)) {
974 m_formatter.prefix(pre: PRE_OPERAND_SIZE);
975 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_CMP, base, index, scale, offset);
976 m_formatter.immediate8(imm);
977 } else {
978 m_formatter.prefix(pre: PRE_OPERAND_SIZE);
979 m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, base, index, scale, offset);
980 m_formatter.immediate16(imm);
981 }
982 }
983
984 void testl_rr(RegisterID src, RegisterID dst)
985 {
986 m_formatter.oneByteOp(opcode: OP_TEST_EvGv, reg: src, rm: dst);
987 }
988
989 void testl_i32r(int imm, RegisterID dst)
990 {
991 m_formatter.oneByteOp(opcode: OP_GROUP3_EvIz, reg: GROUP3_OP_TEST, rm: dst);
992 m_formatter.immediate32(imm);
993 }
994
995 void testl_i32m(int imm, int offset, RegisterID base)
996 {
997 m_formatter.oneByteOp(opcode: OP_GROUP3_EvIz, reg: GROUP3_OP_TEST, base, offset);
998 m_formatter.immediate32(imm);
999 }
1000
1001 void testb_rr(RegisterID src, RegisterID dst)
1002 {
1003 m_formatter.oneByteOp8(opcode: OP_TEST_EbGb, reg: src, rm: dst);
1004 }
1005
1006 void testb_im(int imm, int offset, RegisterID base)
1007 {
1008 m_formatter.oneByteOp(opcode: OP_GROUP3_EbIb, reg: GROUP3_OP_TEST, base, offset);
1009 m_formatter.immediate8(imm);
1010 }
1011
1012 void testb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
1013 {
1014 m_formatter.oneByteOp(opcode: OP_GROUP3_EbIb, reg: GROUP3_OP_TEST, base, index, scale, offset);
1015 m_formatter.immediate8(imm);
1016 }
1017
1018#if CPU(X86)
1019 void testb_im(int imm, const void* addr)
1020 {
1021 m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, addr);
1022 m_formatter.immediate8(imm);
1023 }
1024#endif
1025
1026 void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
1027 {
1028 m_formatter.oneByteOp(opcode: OP_GROUP3_EvIz, reg: GROUP3_OP_TEST, base, index, scale, offset);
1029 m_formatter.immediate32(imm);
1030 }
1031
1032#if CPU(X86_64)
1033 void testq_rr(RegisterID src, RegisterID dst)
1034 {
1035 m_formatter.oneByteOp64(opcode: OP_TEST_EvGv, reg: src, rm: dst);
1036 }
1037
1038 void testq_rm(RegisterID src, int offset, RegisterID base)
1039 {
1040 m_formatter.oneByteOp64(opcode: OP_TEST_EvGv, reg: src, base, offset);
1041 }
1042
1043 void testq_i32r(int imm, RegisterID dst)
1044 {
1045 m_formatter.oneByteOp64(opcode: OP_GROUP3_EvIz, reg: GROUP3_OP_TEST, rm: dst);
1046 m_formatter.immediate32(imm);
1047 }
1048
1049 void testq_i32m(int imm, int offset, RegisterID base)
1050 {
1051 m_formatter.oneByteOp64(opcode: OP_GROUP3_EvIz, reg: GROUP3_OP_TEST, base, offset);
1052 m_formatter.immediate32(imm);
1053 }
1054
1055 void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
1056 {
1057 m_formatter.oneByteOp64(opcode: OP_GROUP3_EvIz, reg: GROUP3_OP_TEST, base, index, scale, offset);
1058 m_formatter.immediate32(imm);
1059 }
1060#endif
1061
1062 void testw_rr(RegisterID src, RegisterID dst)
1063 {
1064 m_formatter.prefix(pre: PRE_OPERAND_SIZE);
1065 m_formatter.oneByteOp(opcode: OP_TEST_EvGv, reg: src, rm: dst);
1066 }
1067
1068 void testb_i8r(int imm, RegisterID dst)
1069 {
1070 m_formatter.oneByteOp8(opcode: OP_GROUP3_EbIb, groupOp: GROUP3_OP_TEST, rm: dst);
1071 m_formatter.immediate8(imm);
1072 }
1073
1074 void setCC_r(Condition cond, RegisterID dst)
1075 {
1076 m_formatter.twoByteOp8(opcode: setccOpcode(cond), groupOp: (GroupOpcodeID)0, rm: dst);
1077 }
1078
1079 void sete_r(RegisterID dst)
1080 {
1081 m_formatter.twoByteOp8(opcode: setccOpcode(ConditionE), groupOp: (GroupOpcodeID)0, rm: dst);
1082 }
1083
1084 void setz_r(RegisterID dst)
1085 {
1086 sete_r(dst);
1087 }
1088
1089 void setne_r(RegisterID dst)
1090 {
1091 m_formatter.twoByteOp8(opcode: setccOpcode(ConditionNE), groupOp: (GroupOpcodeID)0, rm: dst);
1092 }
1093
1094 void setnz_r(RegisterID dst)
1095 {
1096 setne_r(dst);
1097 }
1098
1099 // Various move ops:
1100
1101 void cdq()
1102 {
1103 m_formatter.oneByteOp(opcode: OP_CDQ);
1104 }
1105
1106 void fstpl(int offset, RegisterID base)
1107 {
1108 m_formatter.oneByteOp(opcode: OP_ESCAPE_DD, reg: ESCAPE_DD_FSTP_doubleReal, base, offset);
1109 }
1110
1111 void xchgl_rr(RegisterID src, RegisterID dst)
1112 {
1113 m_formatter.oneByteOp(opcode: OP_XCHG_EvGv, reg: src, rm: dst);
1114 }
1115
1116#if CPU(X86_64)
1117 void xchgq_rr(RegisterID src, RegisterID dst)
1118 {
1119 m_formatter.oneByteOp64(opcode: OP_XCHG_EvGv, reg: src, rm: dst);
1120 }
1121#endif
1122
1123 void movl_rr(RegisterID src, RegisterID dst)
1124 {
1125 m_formatter.oneByteOp(opcode: OP_MOV_EvGv, reg: src, rm: dst);
1126 }
1127
1128 void movl_rm(RegisterID src, int offset, RegisterID base)
1129 {
1130 m_formatter.oneByteOp(opcode: OP_MOV_EvGv, reg: src, base, offset);
1131 }
1132
1133 void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
1134 {
1135 m_formatter.oneByteOp_disp32(opcode: OP_MOV_EvGv, reg: src, base, offset);
1136 }
1137
1138 void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1139 {
1140 m_formatter.oneByteOp(opcode: OP_MOV_EvGv, reg: src, base, index, scale, offset);
1141 }
1142
1143 void movl_mEAX(const void* addr)
1144 {
1145 m_formatter.oneByteOp(opcode: OP_MOV_EAXOv);
1146#if CPU(X86_64)
1147 m_formatter.immediate64(imm: reinterpret_cast<int64_t>(addr));
1148#else
1149 m_formatter.immediate32(reinterpret_cast<int>(addr));
1150#endif
1151 }
1152
1153 void movl_mr(int offset, RegisterID base, RegisterID dst)
1154 {
1155 m_formatter.oneByteOp(opcode: OP_MOV_GvEv, reg: dst, base, offset);
1156 }
1157
1158 void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
1159 {
1160 m_formatter.oneByteOp_disp32(opcode: OP_MOV_GvEv, reg: dst, base, offset);
1161 }
1162
1163 void movl_mr_disp8(int offset, RegisterID base, RegisterID dst)
1164 {
1165 m_formatter.oneByteOp_disp8(opcode: OP_MOV_GvEv, reg: dst, base, offset);
1166 }
1167
1168 void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1169 {
1170 m_formatter.oneByteOp(opcode: OP_MOV_GvEv, reg: dst, base, index, scale, offset);
1171 }
1172
1173 void movl_i32r(int imm, RegisterID dst)
1174 {
1175 m_formatter.oneByteOp(opcode: OP_MOV_EAXIv, reg: dst);
1176 m_formatter.immediate32(imm);
1177 }
1178
1179 void movl_i32m(int imm, int offset, RegisterID base)
1180 {
1181 m_formatter.oneByteOp(opcode: OP_GROUP11_EvIz, reg: GROUP11_MOV, base, offset);
1182 m_formatter.immediate32(imm);
1183 }
1184
1185 void movl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
1186 {
1187 m_formatter.oneByteOp(opcode: OP_GROUP11_EvIz, reg: GROUP11_MOV, base, index, scale, offset);
1188 m_formatter.immediate32(imm);
1189 }
1190
1191#if !CPU(X86_64)
1192 void movb_i8m(int imm, const void* addr)
1193 {
1194 ASSERT(-128 <= imm && imm < 128);
1195 m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, addr);
1196 m_formatter.immediate8(imm);
1197 }
1198#endif
1199
1200 void movb_i8m(int imm, int offset, RegisterID base)
1201 {
1202 ASSERT(-128 <= imm && imm < 128);
1203 m_formatter.oneByteOp(opcode: OP_GROUP11_EvIb, reg: GROUP11_MOV, base, offset);
1204 m_formatter.immediate8(imm);
1205 }
1206
1207 void movb_i8m(int imm, int offset, RegisterID base, RegisterID index, int scale)
1208 {
1209 ASSERT(-128 <= imm && imm < 128);
1210 m_formatter.oneByteOp(opcode: OP_GROUP11_EvIb, reg: GROUP11_MOV, base, index, scale, offset);
1211 m_formatter.immediate8(imm);
1212 }
1213
1214 void movb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1215 {
1216 m_formatter.oneByteOp8(opcode: OP_MOV_EbGb, reg: src, base, index, scale, offset);
1217 }
1218
1219 void movw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1220 {
1221 m_formatter.prefix(pre: PRE_OPERAND_SIZE);
1222 m_formatter.oneByteOp8(opcode: OP_MOV_EvGv, reg: src, base, index, scale, offset);
1223 }
1224
1225 void movl_EAXm(const void* addr)
1226 {
1227 m_formatter.oneByteOp(opcode: OP_MOV_OvEAX);
1228#if CPU(X86_64)
1229 m_formatter.immediate64(imm: reinterpret_cast<int64_t>(addr));
1230#else
1231 m_formatter.immediate32(reinterpret_cast<int>(addr));
1232#endif
1233 }
1234
1235#if CPU(X86_64)
1236 void movq_rr(RegisterID src, RegisterID dst)
1237 {
1238 m_formatter.oneByteOp64(opcode: OP_MOV_EvGv, reg: src, rm: dst);
1239 }
1240
1241 void movq_rm(RegisterID src, int offset, RegisterID base)
1242 {
1243 m_formatter.oneByteOp64(opcode: OP_MOV_EvGv, reg: src, base, offset);
1244 }
1245
1246 void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
1247 {
1248 m_formatter.oneByteOp64_disp32(opcode: OP_MOV_EvGv, reg: src, base, offset);
1249 }
1250
1251 void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1252 {
1253 m_formatter.oneByteOp64(opcode: OP_MOV_EvGv, reg: src, base, index, scale, offset);
1254 }
1255
1256 void movq_mEAX(const void* addr)
1257 {
1258 m_formatter.oneByteOp64(opcode: OP_MOV_EAXOv);
1259 m_formatter.immediate64(imm: reinterpret_cast<int64_t>(addr));
1260 }
1261
1262 void movq_EAXm(const void* addr)
1263 {
1264 m_formatter.oneByteOp64(opcode: OP_MOV_OvEAX);
1265 m_formatter.immediate64(imm: reinterpret_cast<int64_t>(addr));
1266 }
1267
1268 void movq_mr(int offset, RegisterID base, RegisterID dst)
1269 {
1270 m_formatter.oneByteOp64(opcode: OP_MOV_GvEv, reg: dst, base, offset);
1271 }
1272
1273 void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
1274 {
1275 m_formatter.oneByteOp64_disp32(opcode: OP_MOV_GvEv, reg: dst, base, offset);
1276 }
1277
1278 void movq_mr_disp8(int offset, RegisterID base, RegisterID dst)
1279 {
1280 m_formatter.oneByteOp64_disp8(opcode: OP_MOV_GvEv, reg: dst, base, offset);
1281 }
1282
1283 void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1284 {
1285 m_formatter.oneByteOp64(opcode: OP_MOV_GvEv, reg: dst, base, index, scale, offset);
1286 }
1287
1288 void movq_i32m(int imm, int offset, RegisterID base)
1289 {
1290 m_formatter.oneByteOp64(opcode: OP_GROUP11_EvIz, reg: GROUP11_MOV, base, offset);
1291 m_formatter.immediate32(imm);
1292 }
1293
1294 void movq_i64r(int64_t imm, RegisterID dst)
1295 {
1296 m_formatter.oneByteOp64(opcode: OP_MOV_EAXIv, reg: dst);
1297 m_formatter.immediate64(imm);
1298 }
1299
1300 void movsxd_rr(RegisterID src, RegisterID dst)
1301 {
1302 m_formatter.oneByteOp64(opcode: OP_MOVSXD_GvEv, reg: dst, rm: src);
1303 }
1304
1305
1306#else
1307 void movl_rm(RegisterID src, const void* addr)
1308 {
1309 if (src == X86Registers::eax)
1310 movl_EAXm(addr);
1311 else
1312 m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
1313 }
1314
1315 void movl_mr(const void* addr, RegisterID dst)
1316 {
1317 if (dst == X86Registers::eax)
1318 movl_mEAX(addr);
1319 else
1320 m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
1321 }
1322
1323 void movl_i32m(int imm, const void* addr)
1324 {
1325 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
1326 m_formatter.immediate32(imm);
1327 }
1328#endif
1329
1330 void movzwl_mr(int offset, RegisterID base, RegisterID dst)
1331 {
1332 m_formatter.twoByteOp(opcode: OP2_MOVZX_GvEw, reg: dst, base, offset);
1333 }
1334
1335 void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1336 {
1337 m_formatter.twoByteOp(opcode: OP2_MOVZX_GvEw, reg: dst, base, index, scale, offset);
1338 }
1339
1340 void movswl_mr(int offset, RegisterID base, RegisterID dst)
1341 {
1342 m_formatter.twoByteOp(opcode: OP2_MOVSX_GvEw, reg: dst, base, offset);
1343 }
1344
1345 void movswl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1346 {
1347 m_formatter.twoByteOp(opcode: OP2_MOVSX_GvEw, reg: dst, base, index, scale, offset);
1348 }
1349
1350 void movzbl_mr(int offset, RegisterID base, RegisterID dst)
1351 {
1352 m_formatter.twoByteOp(opcode: OP2_MOVZX_GvEb, reg: dst, base, offset);
1353 }
1354
1355 void movzbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1356 {
1357 m_formatter.twoByteOp(opcode: OP2_MOVZX_GvEb, reg: dst, base, index, scale, offset);
1358 }
1359
1360 void movsbl_mr(int offset, RegisterID base, RegisterID dst)
1361 {
1362 m_formatter.twoByteOp(opcode: OP2_MOVSX_GvEb, reg: dst, base, offset);
1363 }
1364
1365 void movsbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1366 {
1367 m_formatter.twoByteOp(opcode: OP2_MOVSX_GvEb, reg: dst, base, index, scale, offset);
1368 }
1369
1370 void movzbl_rr(RegisterID src, RegisterID dst)
1371 {
1372 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
1373 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
1374 // REX prefixes are defined to be silently ignored by the processor.
1375 m_formatter.twoByteOp8(opcode: OP2_MOVZX_GvEb, reg: dst, rm: src);
1376 }
1377
1378 void leal_mr(int offset, RegisterID base, RegisterID dst)
1379 {
1380 m_formatter.oneByteOp(opcode: OP_LEA, reg: dst, base, offset);
1381 }
1382
1383 void leal_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1384 {
1385 m_formatter.oneByteOp(opcode: OP_LEA, reg: dst, base, index, scale, offset);
1386 }
1387
1388#if CPU(X86_64)
1389 void leaq_mr(int offset, RegisterID base, RegisterID dst)
1390 {
1391 m_formatter.oneByteOp64(opcode: OP_LEA, reg: dst, base, offset);
1392 }
1393
1394 void leaq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1395 {
1396 m_formatter.oneByteOp64(opcode: OP_LEA, reg: dst, base, index, scale, offset);
1397 }
1398#endif
1399
1400 // Flow control:
1401
1402 AssemblerLabel call()
1403 {
1404 m_formatter.oneByteOp(opcode: OP_CALL_rel32);
1405 return m_formatter.immediateRel32();
1406 }
1407
1408 AssemblerLabel call(RegisterID dst)
1409 {
1410 m_formatter.oneByteOp(opcode: OP_GROUP5_Ev, reg: GROUP5_OP_CALLN, rm: dst);
1411 return m_formatter.label();
1412 }
1413
1414 void call_m(int offset, RegisterID base)
1415 {
1416 m_formatter.oneByteOp(opcode: OP_GROUP5_Ev, reg: GROUP5_OP_CALLN, base, offset);
1417 }
1418
1419 AssemblerLabel jmp()
1420 {
1421 m_formatter.oneByteOp(opcode: OP_JMP_rel32);
1422 return m_formatter.immediateRel32();
1423 }
1424
1425 // Return a AssemblerLabel so we have a label to the jump, so we can use this
1426 // To make a tail recursive call on x86-64. The MacroAssembler
1427 // really shouldn't wrap this as a Jump, since it can't be linked. :-/
1428 AssemblerLabel jmp_r(RegisterID dst)
1429 {
1430 m_formatter.oneByteOp(opcode: OP_GROUP5_Ev, reg: GROUP5_OP_JMPN, rm: dst);
1431 return m_formatter.label();
1432 }
1433
1434 void jmp_m(int offset, RegisterID base)
1435 {
1436 m_formatter.oneByteOp(opcode: OP_GROUP5_Ev, reg: GROUP5_OP_JMPN, base, offset);
1437 }
1438
1439#if !CPU(X86_64)
1440 void jmp_m(const void* address)
1441 {
1442 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, address);
1443 }
1444#endif
1445
1446 AssemblerLabel jne()
1447 {
1448 m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionNE));
1449 return m_formatter.immediateRel32();
1450 }
1451
1452 AssemblerLabel jnz()
1453 {
1454 return jne();
1455 }
1456
1457 AssemblerLabel je()
1458 {
1459 m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionE));
1460 return m_formatter.immediateRel32();
1461 }
1462
1463 AssemblerLabel jz()
1464 {
1465 return je();
1466 }
1467
1468 AssemblerLabel jl()
1469 {
1470 m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionL));
1471 return m_formatter.immediateRel32();
1472 }
1473
1474 AssemblerLabel jb()
1475 {
1476 m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionB));
1477 return m_formatter.immediateRel32();
1478 }
1479
1480 AssemblerLabel jle()
1481 {
1482 m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionLE));
1483 return m_formatter.immediateRel32();
1484 }
1485
1486 AssemblerLabel jbe()
1487 {
1488 m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionBE));
1489 return m_formatter.immediateRel32();
1490 }
1491
1492 AssemblerLabel jge()
1493 {
1494 m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionGE));
1495 return m_formatter.immediateRel32();
1496 }
1497
1498 AssemblerLabel jg()
1499 {
1500 m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionG));
1501 return m_formatter.immediateRel32();
1502 }
1503
1504 AssemblerLabel ja()
1505 {
1506 m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionA));
1507 return m_formatter.immediateRel32();
1508 }
1509
1510 AssemblerLabel jae()
1511 {
1512 m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionAE));
1513 return m_formatter.immediateRel32();
1514 }
1515
1516 AssemblerLabel jo()
1517 {
1518 m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionO));
1519 return m_formatter.immediateRel32();
1520 }
1521
1522 AssemblerLabel jnp()
1523 {
1524 m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionNP));
1525 return m_formatter.immediateRel32();
1526 }
1527
1528 AssemblerLabel jp()
1529 {
1530 m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionP));
1531 return m_formatter.immediateRel32();
1532 }
1533
1534 AssemblerLabel js()
1535 {
1536 m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionS));
1537 return m_formatter.immediateRel32();
1538 }
1539
1540 AssemblerLabel jCC(Condition cond)
1541 {
1542 m_formatter.twoByteOp(opcode: jccRel32(cond));
1543 return m_formatter.immediateRel32();
1544 }
1545
1546 // SSE operations:
1547
1548 void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
1549 {
1550 m_formatter.prefix(pre: PRE_SSE_F2);
1551 m_formatter.twoByteOp(opcode: OP2_ADDSD_VsdWsd, reg: (RegisterID)dst, rm: (RegisterID)src);
1552 }
1553
1554 void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1555 {
1556 m_formatter.prefix(pre: PRE_SSE_F2);
1557 m_formatter.twoByteOp(opcode: OP2_ADDSD_VsdWsd, reg: (RegisterID)dst, base, offset);
1558 }
1559
1560#if !CPU(X86_64)
1561 void addsd_mr(const void* address, XMMRegisterID dst)
1562 {
1563 m_formatter.prefix(PRE_SSE_F2);
1564 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, address);
1565 }
1566#endif
1567
1568 void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
1569 {
1570 m_formatter.prefix(pre: PRE_SSE_F2);
1571 m_formatter.twoByteOp(opcode: OP2_CVTSI2SD_VsdEd, reg: (RegisterID)dst, rm: src);
1572 }
1573
1574 void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
1575 {
1576 m_formatter.prefix(pre: PRE_SSE_F2);
1577 m_formatter.twoByteOp(opcode: OP2_CVTSI2SD_VsdEd, reg: (RegisterID)dst, base, offset);
1578 }
1579
1580#if !CPU(X86_64)
1581 void cvtsi2sd_mr(const void* address, XMMRegisterID dst)
1582 {
1583 m_formatter.prefix(PRE_SSE_F2);
1584 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
1585 }
1586#endif
1587
1588#if CPU(X86_64)
1589 void cvtsiq2sd_rr(RegisterID src, FPRegisterID dst)
1590 {
1591 m_formatter.prefix(pre: PRE_SSE_F2);
1592 m_formatter.twoByteOp64(opcode: OP2_CVTSI2SD_VsdEd, reg: (RegisterID)dst, rm: src);
1593 }
1594
1595#endif
1596
1597 void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
1598 {
1599 m_formatter.prefix(pre: PRE_SSE_F2);
1600 m_formatter.twoByteOp(opcode: OP2_CVTTSD2SI_GdWsd, reg: dst, rm: (RegisterID)src);
1601 }
1602
1603 void cvtsd2ss_rr(XMMRegisterID src, XMMRegisterID dst)
1604 {
1605 m_formatter.prefix(pre: PRE_SSE_F2);
1606 m_formatter.twoByteOp(opcode: OP2_CVTSD2SS_VsdWsd, reg: dst, rm: (RegisterID)src);
1607 }
1608
1609 void cvtss2sd_rr(XMMRegisterID src, XMMRegisterID dst)
1610 {
1611 m_formatter.prefix(pre: PRE_SSE_F3);
1612 m_formatter.twoByteOp(opcode: OP2_CVTSS2SD_VsdWsd, reg: dst, rm: (RegisterID)src);
1613 }
1614
1615#if CPU(X86_64)
1616 void cvttsd2siq_rr(XMMRegisterID src, RegisterID dst)
1617 {
1618 m_formatter.prefix(pre: PRE_SSE_F2);
1619 m_formatter.twoByteOp64(opcode: OP2_CVTTSD2SI_GdWsd, reg: dst, rm: (RegisterID)src);
1620 }
1621#endif
1622
1623 void movd_rr(XMMRegisterID src, RegisterID dst)
1624 {
1625 m_formatter.prefix(pre: PRE_SSE_66);
1626 m_formatter.twoByteOp(opcode: OP2_MOVD_EdVd, reg: (RegisterID)src, rm: dst);
1627 }
1628
1629 void movd_rr(RegisterID src, XMMRegisterID dst)
1630 {
1631 m_formatter.prefix(pre: PRE_SSE_66);
1632 m_formatter.twoByteOp(opcode: OP2_MOVD_VdEd, reg: (RegisterID)dst, rm: src);
1633 }
1634
1635#if CPU(X86_64)
1636 void movq_rr(XMMRegisterID src, RegisterID dst)
1637 {
1638 m_formatter.prefix(pre: PRE_SSE_66);
1639 m_formatter.twoByteOp64(opcode: OP2_MOVD_EdVd, reg: (RegisterID)src, rm: dst);
1640 }
1641
1642 void movq_rr(RegisterID src, XMMRegisterID dst)
1643 {
1644 m_formatter.prefix(pre: PRE_SSE_66);
1645 m_formatter.twoByteOp64(opcode: OP2_MOVD_VdEd, reg: (RegisterID)dst, rm: src);
1646 }
1647#endif
1648
1649 void movsd_rr(XMMRegisterID src, XMMRegisterID dst)
1650 {
1651 m_formatter.prefix(pre: PRE_SSE_F2);
1652 m_formatter.twoByteOp(opcode: OP2_MOVSD_VsdWsd, reg: (RegisterID)dst, rm: (RegisterID)src);
1653 }
1654
1655 void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
1656 {
1657 m_formatter.prefix(pre: PRE_SSE_F2);
1658 m_formatter.twoByteOp(opcode: OP2_MOVSD_WsdVsd, reg: (RegisterID)src, base, offset);
1659 }
1660
1661 void movsd_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1662 {
1663 m_formatter.prefix(pre: PRE_SSE_F2);
1664 m_formatter.twoByteOp(opcode: OP2_MOVSD_WsdVsd, reg: (RegisterID)src, base, index, scale, offset);
1665 }
1666
1667 void movss_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1668 {
1669 m_formatter.prefix(pre: PRE_SSE_F3);
1670 m_formatter.twoByteOp(opcode: OP2_MOVSD_WsdVsd, reg: (RegisterID)src, base, index, scale, offset);
1671 }
1672
1673 void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1674 {
1675 m_formatter.prefix(pre: PRE_SSE_F2);
1676 m_formatter.twoByteOp(opcode: OP2_MOVSD_VsdWsd, reg: (RegisterID)dst, base, offset);
1677 }
1678
1679 void movsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
1680 {
1681 m_formatter.prefix(pre: PRE_SSE_F2);
1682 m_formatter.twoByteOp(opcode: OP2_MOVSD_VsdWsd, reg: dst, base, index, scale, offset);
1683 }
1684
1685 void movss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
1686 {
1687 m_formatter.prefix(pre: PRE_SSE_F3);
1688 m_formatter.twoByteOp(opcode: OP2_MOVSD_VsdWsd, reg: dst, base, index, scale, offset);
1689 }
1690
1691#if !CPU(X86_64)
1692 void movsd_mr(const void* address, XMMRegisterID dst)
1693 {
1694 m_formatter.prefix(PRE_SSE_F2);
1695 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
1696 }
1697 void movsd_rm(XMMRegisterID src, const void* address)
1698 {
1699 m_formatter.prefix(PRE_SSE_F2);
1700 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, address);
1701 }
1702#endif
1703
1704 void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
1705 {
1706 m_formatter.prefix(pre: PRE_SSE_F2);
1707 m_formatter.twoByteOp(opcode: OP2_MULSD_VsdWsd, reg: (RegisterID)dst, rm: (RegisterID)src);
1708 }
1709
1710 void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1711 {
1712 m_formatter.prefix(pre: PRE_SSE_F2);
1713 m_formatter.twoByteOp(opcode: OP2_MULSD_VsdWsd, reg: (RegisterID)dst, base, offset);
1714 }
1715
1716 void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
1717 {
1718 m_formatter.prefix(pre: PRE_SSE_66);
1719 m_formatter.twoByteOp(opcode: OP2_PEXTRW_GdUdIb, reg: (RegisterID)dst, rm: (RegisterID)src);
1720 m_formatter.immediate8(imm: whichWord);
1721 }
1722
1723 void psllq_i8r(int imm, XMMRegisterID dst)
1724 {
1725 m_formatter.prefix(pre: PRE_SSE_66);
1726 m_formatter.twoByteOp8(opcode: OP2_PSLLQ_UdqIb, groupOp: GROUP14_OP_PSLLQ, rm: (RegisterID)dst);
1727 m_formatter.immediate8(imm);
1728 }
1729
1730 void psrlq_i8r(int imm, XMMRegisterID dst)
1731 {
1732 m_formatter.prefix(pre: PRE_SSE_66);
1733 m_formatter.twoByteOp8(opcode: OP2_PSRLQ_UdqIb, groupOp: GROUP14_OP_PSRLQ, rm: (RegisterID)dst);
1734 m_formatter.immediate8(imm);
1735 }
1736
1737 void por_rr(XMMRegisterID src, XMMRegisterID dst)
1738 {
1739 m_formatter.prefix(pre: PRE_SSE_66);
1740 m_formatter.twoByteOp(opcode: OP2_POR_VdqWdq, reg: (RegisterID)dst, rm: (RegisterID)src);
1741 }
1742
1743 void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
1744 {
1745 m_formatter.prefix(pre: PRE_SSE_F2);
1746 m_formatter.twoByteOp(opcode: OP2_SUBSD_VsdWsd, reg: (RegisterID)dst, rm: (RegisterID)src);
1747 }
1748
1749 void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1750 {
1751 m_formatter.prefix(pre: PRE_SSE_F2);
1752 m_formatter.twoByteOp(opcode: OP2_SUBSD_VsdWsd, reg: (RegisterID)dst, base, offset);
1753 }
1754
1755 void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
1756 {
1757 m_formatter.prefix(pre: PRE_SSE_66);
1758 m_formatter.twoByteOp(opcode: OP2_UCOMISD_VsdWsd, reg: (RegisterID)dst, rm: (RegisterID)src);
1759 }
1760
1761 void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
1762 {
1763 m_formatter.prefix(pre: PRE_SSE_66);
1764 m_formatter.twoByteOp(opcode: OP2_UCOMISD_VsdWsd, reg: (RegisterID)dst, base, offset);
1765 }
1766
1767 void divsd_rr(XMMRegisterID src, XMMRegisterID dst)
1768 {
1769 m_formatter.prefix(pre: PRE_SSE_F2);
1770 m_formatter.twoByteOp(opcode: OP2_DIVSD_VsdWsd, reg: (RegisterID)dst, rm: (RegisterID)src);
1771 }
1772
1773 void divsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1774 {
1775 m_formatter.prefix(pre: PRE_SSE_F2);
1776 m_formatter.twoByteOp(opcode: OP2_DIVSD_VsdWsd, reg: (RegisterID)dst, base, offset);
1777 }
1778
1779 void xorpd_rr(XMMRegisterID src, XMMRegisterID dst)
1780 {
1781 m_formatter.prefix(pre: PRE_SSE_66);
1782 m_formatter.twoByteOp(opcode: OP2_XORPD_VpdWpd, reg: (RegisterID)dst, rm: (RegisterID)src);
1783 }
1784
1785 void andnpd_rr(XMMRegisterID src, XMMRegisterID dst)
1786 {
1787 m_formatter.prefix(pre: PRE_SSE_66);
1788 m_formatter.twoByteOp(opcode: OP2_ANDNPD_VpdWpd, reg: (RegisterID)dst, rm: (RegisterID)src);
1789 }
1790
1791 void sqrtsd_rr(XMMRegisterID src, XMMRegisterID dst)
1792 {
1793 m_formatter.prefix(pre: PRE_SSE_F2);
1794 m_formatter.twoByteOp(opcode: OP2_SQRTSD_VsdWsd, reg: (RegisterID)dst, rm: (RegisterID)src);
1795 }
1796
1797 // Misc instructions:
1798
1799 void int3()
1800 {
1801 m_formatter.oneByteOp(opcode: OP_INT3);
1802 }
1803
1804 void ret()
1805 {
1806 m_formatter.oneByteOp(opcode: OP_RET);
1807 }
1808
1809 void predictNotTaken()
1810 {
1811 m_formatter.prefix(pre: PRE_PREDICT_BRANCH_NOT_TAKEN);
1812 }
1813
1814 // Assembler admin methods:
1815
1816 size_t codeSize() const
1817 {
1818 return m_formatter.codeSize();
1819 }
1820
1821 AssemblerLabel labelForWatchpoint()
1822 {
1823 AssemblerLabel result = m_formatter.label();
1824 if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
1825 result = label();
1826 m_indexOfLastWatchpoint = result.m_offset;
1827 m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
1828 return result;
1829 }
1830
1831 AssemblerLabel labelIgnoringWatchpoints()
1832 {
1833 return m_formatter.label();
1834 }
1835
1836 AssemblerLabel label()
1837 {
1838 AssemblerLabel result = m_formatter.label();
1839 while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
1840 nop();
1841 result = m_formatter.label();
1842 }
1843 return result;
1844 }
1845
1846 AssemblerLabel align(int alignment)
1847 {
1848 while (!m_formatter.isAligned(alignment))
1849 m_formatter.oneByteOp(opcode: OP_HLT);
1850
1851 return label();
1852 }
1853
1854 // Linking & patching:
1855 //
1856 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1857 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1858 // code has been finalized it is (platform support permitting) within a non-
1859 // writable region of memory; to modify the code in an execute-only execuable
1860 // pool the 'repatch' and 'relink' methods should be used.
1861
1862 void linkJump(AssemblerLabel from, AssemblerLabel to)
1863 {
1864 ASSERT(from.isSet());
1865 ASSERT(to.isSet());
1866
1867 char* code = reinterpret_cast<char*>(m_formatter.data());
1868 ASSERT(!loadPossiblyUnaligned<int32_t>(code, from.m_offset, -1));
1869 setRel32(from: code + from.m_offset, to: code + to.m_offset);
1870 }
1871
1872 template<typename T>
1873 T loadPossiblyUnaligned(char *ptr, size_t offset, int idx)
1874 {
1875 T *t_ptr = &reinterpret_cast<T*>(ptr + offset)[idx];
1876 T val;
1877 memcpy(&val, t_ptr, sizeof(T));
1878 return val;
1879 }
1880
1881 static void linkJump(void* code, AssemblerLabel from, void* to)
1882 {
1883 ASSERT(from.isSet());
1884
1885 setRel32(from: reinterpret_cast<char*>(code) + from.m_offset, to);
1886 }
1887
1888 static void linkCall(void* code, AssemblerLabel from, void* to)
1889 {
1890 ASSERT(from.isSet());
1891
1892 setRel32(from: reinterpret_cast<char*>(code) + from.m_offset, to);
1893 }
1894
1895 static void linkPointer(void* code, AssemblerLabel where, void* value)
1896 {
1897 ASSERT(where.isSet());
1898
1899 setPointer(where: reinterpret_cast<char*>(code) + where.m_offset, value);
1900 }
1901
1902 static void relinkJump(void* from, void* to)
1903 {
1904 setRel32(from, to);
1905 }
1906
1907 static void relinkCall(void* from, void* to)
1908 {
1909 setRel32(from, to);
1910 }
1911
1912 static void repatchCompact(void* where, int32_t value)
1913 {
1914 ASSERT(value >= std::numeric_limits<int8_t>::min());
1915 ASSERT(value <= std::numeric_limits<int8_t>::max());
1916 setInt8(where, value);
1917 }
1918
1919 static void repatchInt32(void* where, int32_t value)
1920 {
1921 setInt32(where, value);
1922 }
1923
1924 static void repatchPointer(void* where, void* value)
1925 {
1926 setPointer(where, value);
1927 }
1928
1929 static void* readPointer(void* where)
1930 {
1931 return reinterpret_cast<void**>(where)[-1];
1932 }
1933
1934 static void replaceWithJump(void* instructionStart, void* to)
1935 {
1936 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
1937 uint8_t* dstPtr = reinterpret_cast<uint8_t*>(to);
1938 intptr_t distance = (intptr_t)(dstPtr - (ptr + 5));
1939 ptr[0] = static_cast<uint8_t>(OP_JMP_rel32);
1940 *reinterpret_cast<int32_t*>(ptr + 1) = static_cast<int32_t>(distance);
1941 }
1942
1943 static ptrdiff_t maxJumpReplacementSize()
1944 {
1945 return 5;
1946 }
1947
1948#if CPU(X86_64)
1949 static void revertJumpTo_movq_i64r(void* instructionStart, int64_t imm, RegisterID dst)
1950 {
1951 const int rexBytes = 1;
1952 const int opcodeBytes = 1;
1953 ASSERT(rexBytes + opcodeBytes <= maxJumpReplacementSize());
1954 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
1955 ptr[0] = PRE_REX | (1 << 3) | (dst >> 3);
1956 ptr[1] = OP_MOV_EAXIv | (dst & 7);
1957
1958 union {
1959 uint64_t asWord;
1960 uint8_t asBytes[8];
1961 } u;
1962 u.asWord = imm;
1963 for (unsigned i = rexBytes + opcodeBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
1964 ptr[i] = u.asBytes[i - rexBytes - opcodeBytes];
1965 }
1966#endif
1967
1968 static void revertJumpTo_cmpl_ir_force32(void* instructionStart, int32_t imm, RegisterID dst)
1969 {
1970 const int opcodeBytes = 1;
1971 const int modRMBytes = 1;
1972 ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize());
1973 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
1974 ptr[0] = OP_GROUP1_EvIz;
1975 ptr[1] = (X86InstructionFormatter::ModRmRegister << 6) | (GROUP1_OP_CMP << 3) | dst;
1976 union {
1977 uint32_t asWord;
1978 uint8_t asBytes[4];
1979 } u;
1980 u.asWord = imm;
1981 for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
1982 ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes];
1983 }
1984
1985 static void revertJumpTo_cmpl_im_force32(void* instructionStart, int32_t imm, int offset, RegisterID dst)
1986 {
1987 ASSERT_UNUSED(offset, !offset);
1988 const int opcodeBytes = 1;
1989 const int modRMBytes = 1;
1990 ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize());
1991 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
1992 ptr[0] = OP_GROUP1_EvIz;
1993 ptr[1] = (X86InstructionFormatter::ModRmMemoryNoDisp << 6) | (GROUP1_OP_CMP << 3) | dst;
1994 union {
1995 uint32_t asWord;
1996 uint8_t asBytes[4];
1997 } u;
1998 u.asWord = imm;
1999 for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
2000 ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes];
2001 }
2002
2003 static void replaceWithLoad(void* instructionStart)
2004 {
2005 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
2006#if CPU(X86_64)
2007 if ((*ptr & ~15) == PRE_REX)
2008 ptr++;
2009#endif
2010 switch (*ptr) {
2011 case OP_MOV_GvEv:
2012 break;
2013 case OP_LEA:
2014 *ptr = OP_MOV_GvEv;
2015 break;
2016 default:
2017 RELEASE_ASSERT_NOT_REACHED();
2018 }
2019 }
2020
2021 static void replaceWithAddressComputation(void* instructionStart)
2022 {
2023 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
2024#if CPU(X86_64)
2025 if ((*ptr & ~15) == PRE_REX)
2026 ptr++;
2027#endif
2028 switch (*ptr) {
2029 case OP_MOV_GvEv:
2030 *ptr = OP_LEA;
2031 break;
2032 case OP_LEA:
2033 break;
2034 default:
2035 RELEASE_ASSERT_NOT_REACHED();
2036 }
2037 }
2038
2039 static unsigned getCallReturnOffset(AssemblerLabel call)
2040 {
2041 ASSERT(call.isSet());
2042 return call.m_offset;
2043 }
2044
2045 static void* getRelocatedAddress(void* code, AssemblerLabel label)
2046 {
2047 ASSERT(label.isSet());
2048 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
2049 }
2050
2051 static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
2052 {
2053 return b.m_offset - a.m_offset;
2054 }
2055
2056 PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
2057 {
2058 return m_formatter.executableCopy(globalData, ownerUID, effort);
2059 }
2060
2061 unsigned debugOffset() { return m_formatter.debugOffset(); }
2062
2063 void nop()
2064 {
2065 m_formatter.oneByteOp(opcode: OP_NOP);
2066 }
2067
2068 // This is a no-op on x86
2069 ALWAYS_INLINE static void cacheFlush(void*, size_t) { }
2070
2071private:
2072
2073 static void setPointer(void* where, void* value)
2074 {
2075 reinterpret_cast<void**>(where)[-1] = value;
2076 }
2077
2078 static void setInt32(void* where, int32_t value)
2079 {
2080 storePossiblyUnaligned<int32_t>(where, idx: -1, value);
2081 }
2082
2083 template <typename T>
2084 static void storePossiblyUnaligned(void *where, int idx, T value)
2085 {
2086 T *ptr = &reinterpret_cast<T*>(where)[idx];
2087 memcpy(ptr, &value, sizeof(T));
2088 }
2089
2090 static void setInt8(void* where, int8_t value)
2091 {
2092 reinterpret_cast<int8_t*>(where)[-1] = value;
2093 }
2094
2095 static void setRel32(void* from, void* to)
2096 {
2097 intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
2098 ASSERT(offset == static_cast<int32_t>(offset));
2099
2100 setInt32(where: from, value: offset);
2101 }
2102
2103 class X86InstructionFormatter {
2104
2105 static const int maxInstructionSize = 16;
2106
2107 public:
2108
2109 enum ModRmMode {
2110 ModRmMemoryNoDisp,
2111 ModRmMemoryDisp8,
2112 ModRmMemoryDisp32,
2113 ModRmRegister,
2114 };
2115
2116 // Legacy prefix bytes:
2117 //
2118 // These are emmitted prior to the instruction.
2119
2120 void prefix(OneByteOpcodeID pre)
2121 {
2122 m_buffer.putByte(value: pre);
2123 }
2124
2125 // Word-sized operands / no operand instruction formatters.
2126 //
2127 // In addition to the opcode, the following operand permutations are supported:
2128 // * None - instruction takes no operands.
2129 // * One register - the low three bits of the RegisterID are added into the opcode.
2130 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
2131 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
2132 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
2133 //
2134 // For 32-bit x86 targets, the address operand may also be provided as a void*.
2135 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
2136 //
2137 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
2138
2139 void oneByteOp(OneByteOpcodeID opcode)
2140 {
2141 m_buffer.ensureSpace(space: maxInstructionSize);
2142 m_buffer.putByteUnchecked(value: opcode);
2143 }
2144
2145 void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
2146 {
2147 m_buffer.ensureSpace(space: maxInstructionSize);
2148 emitRexIfNeeded(r: 0, x: 0, b: reg);
2149 m_buffer.putByteUnchecked(value: opcode + (reg & 7));
2150 }
2151
2152 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
2153 {
2154 m_buffer.ensureSpace(space: maxInstructionSize);
2155 emitRexIfNeeded(r: reg, x: 0, b: rm);
2156 m_buffer.putByteUnchecked(value: opcode);
2157 registerModRM(reg, rm);
2158 }
2159
2160 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2161 {
2162 m_buffer.ensureSpace(space: maxInstructionSize);
2163 emitRexIfNeeded(r: reg, x: 0, b: base);
2164 m_buffer.putByteUnchecked(value: opcode);
2165 memoryModRM(reg, base, offset);
2166 }
2167
2168 void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2169 {
2170 m_buffer.ensureSpace(space: maxInstructionSize);
2171 emitRexIfNeeded(r: reg, x: 0, b: base);
2172 m_buffer.putByteUnchecked(value: opcode);
2173 memoryModRM_disp32(reg, base, offset);
2174 }
2175
2176 void oneByteOp_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2177 {
2178 m_buffer.ensureSpace(space: maxInstructionSize);
2179 emitRexIfNeeded(r: reg, x: 0, b: base);
2180 m_buffer.putByteUnchecked(value: opcode);
2181 memoryModRM_disp8(reg, base, offset);
2182 }
2183
2184 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
2185 {
2186 m_buffer.ensureSpace(space: maxInstructionSize);
2187 emitRexIfNeeded(r: reg, x: index, b: base);
2188 m_buffer.putByteUnchecked(value: opcode);
2189 memoryModRM(reg, base, index, scale, offset);
2190 }
2191
2192#if !CPU(X86_64)
2193 void oneByteOp(OneByteOpcodeID opcode, int reg, const void* address)
2194 {
2195 m_buffer.ensureSpace(maxInstructionSize);
2196 m_buffer.putByteUnchecked(opcode);
2197 memoryModRM(reg, address);
2198 }
2199#endif
2200
2201 void twoByteOp(TwoByteOpcodeID opcode)
2202 {
2203 m_buffer.ensureSpace(space: maxInstructionSize);
2204 m_buffer.putByteUnchecked(value: OP_2BYTE_ESCAPE);
2205 m_buffer.putByteUnchecked(value: opcode);
2206 }
2207
2208 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
2209 {
2210 m_buffer.ensureSpace(space: maxInstructionSize);
2211 emitRexIfNeeded(r: reg, x: 0, b: rm);
2212 m_buffer.putByteUnchecked(value: OP_2BYTE_ESCAPE);
2213 m_buffer.putByteUnchecked(value: opcode);
2214 registerModRM(reg, rm);
2215 }
2216
2217 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
2218 {
2219 m_buffer.ensureSpace(space: maxInstructionSize);
2220 emitRexIfNeeded(r: reg, x: 0, b: base);
2221 m_buffer.putByteUnchecked(value: OP_2BYTE_ESCAPE);
2222 m_buffer.putByteUnchecked(value: opcode);
2223 memoryModRM(reg, base, offset);
2224 }
2225
2226 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
2227 {
2228 m_buffer.ensureSpace(space: maxInstructionSize);
2229 emitRexIfNeeded(r: reg, x: index, b: base);
2230 m_buffer.putByteUnchecked(value: OP_2BYTE_ESCAPE);
2231 m_buffer.putByteUnchecked(value: opcode);
2232 memoryModRM(reg, base, index, scale, offset);
2233 }
2234
2235#if !CPU(X86_64)
2236 void twoByteOp(TwoByteOpcodeID opcode, int reg, const void* address)
2237 {
2238 m_buffer.ensureSpace(maxInstructionSize);
2239 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2240 m_buffer.putByteUnchecked(opcode);
2241 memoryModRM(reg, address);
2242 }
2243#endif
2244
2245#if CPU(X86_64)
2246 // Quad-word-sized operands:
2247 //
2248 // Used to format 64-bit operantions, planting a REX.w prefix.
2249 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
2250 // the normal (non-'64'-postfixed) formatters should be used.
2251
2252 void oneByteOp64(OneByteOpcodeID opcode)
2253 {
2254 m_buffer.ensureSpace(space: maxInstructionSize);
2255 emitRexW(r: 0, x: 0, b: 0);
2256 m_buffer.putByteUnchecked(value: opcode);
2257 }
2258
2259 void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
2260 {
2261 m_buffer.ensureSpace(space: maxInstructionSize);
2262 emitRexW(r: 0, x: 0, b: reg);
2263 m_buffer.putByteUnchecked(value: opcode + (reg & 7));
2264 }
2265
2266 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
2267 {
2268 m_buffer.ensureSpace(space: maxInstructionSize);
2269 emitRexW(r: reg, x: 0, b: rm);
2270 m_buffer.putByteUnchecked(value: opcode);
2271 registerModRM(reg, rm);
2272 }
2273
2274 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2275 {
2276 m_buffer.ensureSpace(space: maxInstructionSize);
2277 emitRexW(r: reg, x: 0, b: base);
2278 m_buffer.putByteUnchecked(value: opcode);
2279 memoryModRM(reg, base, offset);
2280 }
2281
2282 void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2283 {
2284 m_buffer.ensureSpace(space: maxInstructionSize);
2285 emitRexW(r: reg, x: 0, b: base);
2286 m_buffer.putByteUnchecked(value: opcode);
2287 memoryModRM_disp32(reg, base, offset);
2288 }
2289
2290 void oneByteOp64_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2291 {
2292 m_buffer.ensureSpace(space: maxInstructionSize);
2293 emitRexW(r: reg, x: 0, b: base);
2294 m_buffer.putByteUnchecked(value: opcode);
2295 memoryModRM_disp8(reg, base, offset);
2296 }
2297
2298 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
2299 {
2300 m_buffer.ensureSpace(space: maxInstructionSize);
2301 emitRexW(r: reg, x: index, b: base);
2302 m_buffer.putByteUnchecked(value: opcode);
2303 memoryModRM(reg, base, index, scale, offset);
2304 }
2305
2306 void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
2307 {
2308 m_buffer.ensureSpace(space: maxInstructionSize);
2309 emitRexW(r: reg, x: 0, b: rm);
2310 m_buffer.putByteUnchecked(value: OP_2BYTE_ESCAPE);
2311 m_buffer.putByteUnchecked(value: opcode);
2312 registerModRM(reg, rm);
2313 }
2314#endif
2315
2316 // Byte-operands:
2317 //
2318 // These methods format byte operations. Byte operations differ from the normal
2319 // formatters in the circumstances under which they will decide to emit REX prefixes.
2320 // These should be used where any register operand signifies a byte register.
2321 //
2322 // The disctinction is due to the handling of register numbers in the range 4..7 on
2323 // x86-64. These register numbers may either represent the second byte of the first
2324 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
2325 //
2326 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
2327 // be accessed where a REX prefix is present), these are likely best treated as
2328 // deprecated. In order to ensure the correct registers spl..dil are selected a
2329 // REX prefix will be emitted for any byte register operand in the range 4..15.
2330 //
2331 // These formatters may be used in instructions where a mix of operand sizes, in which
2332 // case an unnecessary REX will be emitted, for example:
2333 // movzbl %al, %edi
2334 // In this case a REX will be planted since edi is 7 (and were this a byte operand
2335 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
2336 // be silently ignored by the processor.
2337 //
2338 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
2339 // is provided to check byte register operands.
2340
2341 void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
2342 {
2343 m_buffer.ensureSpace(space: maxInstructionSize);
2344 emitRexIf(condition: byteRegRequiresRex(reg: rm), r: 0, x: 0, b: rm);
2345 m_buffer.putByteUnchecked(value: opcode);
2346 registerModRM(reg: groupOp, rm);
2347 }
2348
2349 void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID rm)
2350 {
2351 m_buffer.ensureSpace(space: maxInstructionSize);
2352 emitRexIf(condition: byteRegRequiresRex(reg) || byteRegRequiresRex(reg: rm), r: reg, x: 0, b: rm);
2353 m_buffer.putByteUnchecked(value: opcode);
2354 registerModRM(reg, rm);
2355 }
2356
2357 void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
2358 {
2359 m_buffer.ensureSpace(space: maxInstructionSize);
2360 emitRexIf(condition: byteRegRequiresRex(reg) || regRequiresRex(reg: index) || regRequiresRex(reg: base), r: reg, x: index, b: base);
2361 m_buffer.putByteUnchecked(value: opcode);
2362 memoryModRM(reg, base, index, scale, offset);
2363 }
2364
2365 void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
2366 {
2367 m_buffer.ensureSpace(space: maxInstructionSize);
2368 emitRexIf(condition: byteRegRequiresRex(reg) || byteRegRequiresRex(reg: rm), r: reg, x: 0, b: rm);
2369 m_buffer.putByteUnchecked(value: OP_2BYTE_ESCAPE);
2370 m_buffer.putByteUnchecked(value: opcode);
2371 registerModRM(reg, rm);
2372 }
2373
2374 void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
2375 {
2376 m_buffer.ensureSpace(space: maxInstructionSize);
2377 emitRexIf(condition: byteRegRequiresRex(reg: rm), r: 0, x: 0, b: rm);
2378 m_buffer.putByteUnchecked(value: OP_2BYTE_ESCAPE);
2379 m_buffer.putByteUnchecked(value: opcode);
2380 registerModRM(reg: groupOp, rm);
2381 }
2382
2383 // Immediates:
2384 //
2385 // An immedaite should be appended where appropriate after an op has been emitted.
2386 // The writes are unchecked since the opcode formatters above will have ensured space.
2387
2388 void immediate8(int imm)
2389 {
2390 m_buffer.putByteUnchecked(value: imm);
2391 }
2392
2393 void immediate16(int imm)
2394 {
2395 m_buffer.putShortUnchecked(value: imm);
2396 }
2397
2398 void immediate32(int imm)
2399 {
2400 m_buffer.putIntUnchecked(value: imm);
2401 }
2402
2403 void immediate64(int64_t imm)
2404 {
2405 m_buffer.putInt64Unchecked(value: imm);
2406 }
2407
2408 AssemblerLabel immediateRel32()
2409 {
2410 m_buffer.putIntUnchecked(value: 0);
2411 return label();
2412 }
2413
2414 // Administrative methods:
2415
2416 size_t codeSize() const { return m_buffer.codeSize(); }
2417 AssemblerLabel label() const { return m_buffer.label(); }
2418 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
2419 void* data() const { return m_buffer.data(); }
2420
2421 PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
2422 {
2423 return m_buffer.executableCopy(globalData, ownerUID, effort);
2424 }
2425
2426 unsigned debugOffset() { return m_buffer.debugOffset(); }
2427
2428 private:
2429
2430 // Internals; ModRm and REX formatters.
2431
2432 static const RegisterID noBase = X86Registers::ebp;
2433 static const RegisterID hasSib = X86Registers::esp;
2434 static const RegisterID noIndex = X86Registers::esp;
2435#if CPU(X86_64)
2436 static const RegisterID noBase2 = X86Registers::r13;
2437 static const RegisterID hasSib2 = X86Registers::r12;
2438
2439 // Registers r8 & above require a REX prefixe.
2440 inline bool regRequiresRex(int reg)
2441 {
2442 return (reg >= X86Registers::r8);
2443 }
2444
2445 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
2446 inline bool byteRegRequiresRex(int reg)
2447 {
2448 return (reg >= X86Registers::esp);
2449 }
2450
2451 // Format a REX prefix byte.
2452 inline void emitRex(bool w, int r, int x, int b)
2453 {
2454 ASSERT(r >= 0);
2455 ASSERT(x >= 0);
2456 ASSERT(b >= 0);
2457 m_buffer.putByteUnchecked(value: PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
2458 }
2459
2460 // Used to plant a REX byte with REX.w set (for 64-bit operations).
2461 inline void emitRexW(int r, int x, int b)
2462 {
2463 emitRex(w: true, r, x, b);
2464 }
2465
2466 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
2467 // regRequiresRex() to check other registers (i.e. address base & index).
2468 inline void emitRexIf(bool condition, int r, int x, int b)
2469 {
2470 if (condition) emitRex(w: false, r, x, b);
2471 }
2472
2473 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
2474 inline void emitRexIfNeeded(int r, int x, int b)
2475 {
2476 emitRexIf(condition: regRequiresRex(reg: r) || regRequiresRex(reg: x) || regRequiresRex(reg: b), r, x, b);
2477 }
2478#else
2479 // No REX prefix bytes on 32-bit x86.
2480 inline bool regRequiresRex(int) { return false; }
2481 inline bool byteRegRequiresRex(int) { return false; }
2482 inline void emitRexIf(bool, int, int, int) {}
2483 inline void emitRexIfNeeded(int, int, int) {}
2484#endif
2485
2486 void putModRm(ModRmMode mode, int reg, RegisterID rm)
2487 {
2488 m_buffer.putByteUnchecked(value: (mode << 6) | ((reg & 7) << 3) | (rm & 7));
2489 }
2490
2491 void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
2492 {
2493 ASSERT(mode != ModRmRegister);
2494
2495 putModRm(mode, reg, rm: hasSib);
2496 m_buffer.putByteUnchecked(value: (scale << 6) | ((index & 7) << 3) | (base & 7));
2497 }
2498
2499 void registerModRM(int reg, RegisterID rm)
2500 {
2501 putModRm(mode: ModRmRegister, reg, rm);
2502 }
2503
2504 void memoryModRM(int reg, RegisterID base, int offset)
2505 {
2506 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2507#if CPU(X86_64)
2508 if ((base == hasSib) || (base == hasSib2)) {
2509#else
2510 if (base == hasSib) {
2511#endif
2512 if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
2513 putModRmSib(mode: ModRmMemoryNoDisp, reg, base, index: noIndex, scale: 0);
2514 else if (CAN_SIGN_EXTEND_8_32(value: offset)) {
2515 putModRmSib(mode: ModRmMemoryDisp8, reg, base, index: noIndex, scale: 0);
2516 m_buffer.putByteUnchecked(value: offset);
2517 } else {
2518 putModRmSib(mode: ModRmMemoryDisp32, reg, base, index: noIndex, scale: 0);
2519 m_buffer.putIntUnchecked(value: offset);
2520 }
2521 } else {
2522#if CPU(X86_64)
2523 if (!offset && (base != noBase) && (base != noBase2))
2524#else
2525 if (!offset && (base != noBase))
2526#endif
2527 putModRm(mode: ModRmMemoryNoDisp, reg, rm: base);
2528 else if (CAN_SIGN_EXTEND_8_32(value: offset)) {
2529 putModRm(mode: ModRmMemoryDisp8, reg, rm: base);
2530 m_buffer.putByteUnchecked(value: offset);
2531 } else {
2532 putModRm(mode: ModRmMemoryDisp32, reg, rm: base);
2533 m_buffer.putIntUnchecked(value: offset);
2534 }
2535 }
2536 }
2537
2538 void memoryModRM_disp8(int reg, RegisterID base, int offset)
2539 {
2540 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2541 ASSERT(CAN_SIGN_EXTEND_8_32(offset));
2542#if CPU(X86_64)
2543 if ((base == hasSib) || (base == hasSib2)) {
2544#else
2545 if (base == hasSib) {
2546#endif
2547 putModRmSib(mode: ModRmMemoryDisp8, reg, base, index: noIndex, scale: 0);
2548 m_buffer.putByteUnchecked(value: offset);
2549 } else {
2550 putModRm(mode: ModRmMemoryDisp8, reg, rm: base);
2551 m_buffer.putByteUnchecked(value: offset);
2552 }
2553 }
2554
2555 void memoryModRM_disp32(int reg, RegisterID base, int offset)
2556 {
2557 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2558#if CPU(X86_64)
2559 if ((base == hasSib) || (base == hasSib2)) {
2560#else
2561 if (base == hasSib) {
2562#endif
2563 putModRmSib(mode: ModRmMemoryDisp32, reg, base, index: noIndex, scale: 0);
2564 m_buffer.putIntUnchecked(value: offset);
2565 } else {
2566 putModRm(mode: ModRmMemoryDisp32, reg, rm: base);
2567 m_buffer.putIntUnchecked(value: offset);
2568 }
2569 }
2570
2571 void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
2572 {
2573 ASSERT(index != noIndex);
2574
2575#if CPU(X86_64)
2576 if (!offset && (base != noBase) && (base != noBase2))
2577#else
2578 if (!offset && (base != noBase))
2579#endif
2580 putModRmSib(mode: ModRmMemoryNoDisp, reg, base, index, scale);
2581 else if (CAN_SIGN_EXTEND_8_32(value: offset)) {
2582 putModRmSib(mode: ModRmMemoryDisp8, reg, base, index, scale);
2583 m_buffer.putByteUnchecked(value: offset);
2584 } else {
2585 putModRmSib(mode: ModRmMemoryDisp32, reg, base, index, scale);
2586 m_buffer.putIntUnchecked(value: offset);
2587 }
2588 }
2589
2590#if !CPU(X86_64)
2591 void memoryModRM(int reg, const void* address)
2592 {
2593 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
2594 putModRm(ModRmMemoryNoDisp, reg, noBase);
2595 m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
2596 }
2597#endif
2598
2599 AssemblerBuffer m_buffer;
2600 } m_formatter;
2601 int m_indexOfLastWatchpoint;
2602 int m_indexOfTailOfLastWatchpoint;
2603};
2604
2605} // namespace JSC
2606
2607#endif // ENABLE(ASSEMBLER) && CPU(X86)
2608
2609#endif // X86Assembler_h
2610

source code of qtdeclarative/src/3rdparty/masm/assembler/X86Assembler.h