| 1 | /* | 
| 2 |  * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. | 
| 3 |  * | 
| 4 |  * Redistribution and use in source and binary forms, with or without | 
| 5 |  * modification, are permitted provided that the following conditions | 
| 6 |  * are met: | 
| 7 |  * 1. Redistributions of source code must retain the above copyright | 
| 8 |  *    notice, this list of conditions and the following disclaimer. | 
| 9 |  * 2. Redistributions in binary form must reproduce the above copyright | 
| 10 |  *    notice, this list of conditions and the following disclaimer in the | 
| 11 |  *    documentation and/or other materials provided with the distribution. | 
| 12 |  * | 
| 13 |  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | 
| 14 |  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | 
| 15 |  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | 
| 16 |  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR | 
| 17 |  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | 
| 18 |  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | 
| 19 |  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | 
| 20 |  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | 
| 21 |  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 
| 22 |  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 
| 23 |  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.  | 
| 24 |  */ | 
| 25 |  | 
| 26 | #ifndef X86Assembler_h | 
| 27 | #define X86Assembler_h | 
| 28 |  | 
| 29 | #include <Platform.h> | 
| 30 |  | 
| 31 | #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64)) | 
| 32 |  | 
| 33 | #include "AssemblerBuffer.h" | 
| 34 | #include "AbstractMacroAssembler.h" | 
| 35 | #include "JITCompilationEffort.h" | 
| 36 | #include <stdint.h> | 
| 37 | #include <wtf/Assertions.h> | 
| 38 | #include <wtf/Vector.h> | 
| 39 |  | 
| 40 | namespace JSC { | 
| 41 |  | 
| 42 | inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; } | 
| 43 |  | 
| 44 | namespace X86Registers { | 
| 45 |     typedef enum { | 
| 46 |         eax, | 
| 47 |         ecx, | 
| 48 |         edx, | 
| 49 |         ebx, | 
| 50 |         esp, | 
| 51 |         ebp, | 
| 52 |         esi, | 
| 53 |         edi, | 
| 54 |  | 
| 55 | #if CPU(X86_64) | 
| 56 |         r8, | 
| 57 |         r9, | 
| 58 |         r10, | 
| 59 |         r11, | 
| 60 |         r12, | 
| 61 |         r13, | 
| 62 |         r14, | 
| 63 |         r15, | 
| 64 | #endif | 
| 65 |     } RegisterID; | 
| 66 |  | 
| 67 |     typedef enum { | 
| 68 |         xmm0, | 
| 69 |         xmm1, | 
| 70 |         xmm2, | 
| 71 |         xmm3, | 
| 72 |         xmm4, | 
| 73 |         xmm5, | 
| 74 |         xmm6, | 
| 75 |         xmm7, | 
| 76 |     } XMMRegisterID; | 
| 77 | } | 
| 78 |  | 
| 79 | class X86Assembler { | 
| 80 | public: | 
| 81 |     typedef X86Registers::RegisterID RegisterID; | 
| 82 |     typedef X86Registers::XMMRegisterID XMMRegisterID; | 
| 83 |     typedef XMMRegisterID FPRegisterID; | 
| 84 |  | 
| 85 |     typedef enum { | 
| 86 |         ConditionO, | 
| 87 |         ConditionNO, | 
| 88 |         ConditionB, | 
| 89 |         ConditionAE, | 
| 90 |         ConditionE, | 
| 91 |         ConditionNE, | 
| 92 |         ConditionBE, | 
| 93 |         ConditionA, | 
| 94 |         ConditionS, | 
| 95 |         ConditionNS, | 
| 96 |         ConditionP, | 
| 97 |         ConditionNP, | 
| 98 |         ConditionL, | 
| 99 |         ConditionGE, | 
| 100 |         ConditionLE, | 
| 101 |         ConditionG, | 
| 102 |  | 
| 103 |         ConditionC  = ConditionB, | 
| 104 |         ConditionNC = ConditionAE, | 
| 105 |     } Condition; | 
| 106 |  | 
| 107 | private: | 
| 108 |     typedef enum { | 
| 109 |         OP_ADD_EvGv                     = 0x01, | 
| 110 |         OP_ADD_GvEv                     = 0x03, | 
| 111 |         OP_OR_EvGv                      = 0x09, | 
| 112 |         OP_OR_GvEv                      = 0x0B, | 
| 113 |         OP_2BYTE_ESCAPE                 = 0x0F, | 
| 114 |         OP_AND_EvGv                     = 0x21, | 
| 115 |         OP_AND_GvEv                     = 0x23, | 
| 116 |         OP_SUB_EvGv                     = 0x29, | 
| 117 |         OP_SUB_GvEv                     = 0x2B, | 
| 118 |         PRE_PREDICT_BRANCH_NOT_TAKEN    = 0x2E, | 
| 119 |         OP_XOR_EvGv                     = 0x31, | 
| 120 |         OP_XOR_GvEv                     = 0x33, | 
| 121 |         OP_CMP_EvGv                     = 0x39, | 
| 122 |         OP_CMP_GvEv                     = 0x3B, | 
| 123 | #if CPU(X86_64) | 
| 124 |         PRE_REX                         = 0x40, | 
| 125 | #endif | 
| 126 |         OP_PUSH_EAX                     = 0x50, | 
| 127 |         OP_POP_EAX                      = 0x58, | 
| 128 | #if CPU(X86_64) | 
| 129 |         OP_MOVSXD_GvEv                  = 0x63, | 
| 130 | #endif | 
| 131 |         PRE_OPERAND_SIZE                = 0x66, | 
| 132 |         PRE_SSE_66                      = 0x66, | 
| 133 |         OP_PUSH_Iz                      = 0x68, | 
| 134 |         OP_IMUL_GvEvIz                  = 0x69, | 
| 135 |         OP_GROUP1_EbIb                  = 0x80, | 
| 136 |         OP_GROUP1_EvIz                  = 0x81, | 
| 137 |         OP_GROUP1_EvIb                  = 0x83, | 
| 138 |         OP_TEST_EbGb                    = 0x84, | 
| 139 |         OP_TEST_EvGv                    = 0x85, | 
| 140 |         OP_XCHG_EvGv                    = 0x87, | 
| 141 |         OP_MOV_EbGb                     = 0x88, | 
| 142 |         OP_MOV_EvGv                     = 0x89, | 
| 143 |         OP_MOV_GvEv                     = 0x8B, | 
| 144 |         OP_LEA                          = 0x8D, | 
| 145 |         OP_GROUP1A_Ev                   = 0x8F, | 
| 146 |         OP_NOP                          = 0x90, | 
| 147 |         OP_CDQ                          = 0x99, | 
| 148 |         OP_MOV_EAXOv                    = 0xA1, | 
| 149 |         OP_MOV_OvEAX                    = 0xA3, | 
| 150 |         OP_MOV_EAXIv                    = 0xB8, | 
| 151 |         OP_GROUP2_EvIb                  = 0xC1, | 
| 152 |         OP_RET                          = 0xC3, | 
| 153 |         OP_GROUP11_EvIb                 = 0xC6, | 
| 154 |         OP_GROUP11_EvIz                 = 0xC7, | 
| 155 |         OP_INT3                         = 0xCC, | 
| 156 |         OP_GROUP2_Ev1                   = 0xD1, | 
| 157 |         OP_GROUP2_EvCL                  = 0xD3, | 
| 158 |         OP_ESCAPE_DD                    = 0xDD, | 
| 159 |         OP_CALL_rel32                   = 0xE8, | 
| 160 |         OP_JMP_rel32                    = 0xE9, | 
| 161 |         PRE_SSE_F2                      = 0xF2, | 
| 162 |         PRE_SSE_F3                      = 0xF3, | 
| 163 |         OP_HLT                          = 0xF4, | 
| 164 |         OP_GROUP3_EbIb                  = 0xF6, | 
| 165 |         OP_GROUP3_Ev                    = 0xF7, | 
| 166 |         OP_GROUP3_EvIz                  = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.  | 
| 167 |         OP_GROUP5_Ev                    = 0xFF, | 
| 168 |     } OneByteOpcodeID; | 
| 169 |  | 
| 170 |     typedef enum { | 
| 171 |         OP2_MOVSD_VsdWsd    = 0x10, | 
| 172 |         OP2_MOVSD_WsdVsd    = 0x11, | 
| 173 |         OP2_MOVSS_VsdWsd    = 0x10, | 
| 174 |         OP2_MOVSS_WsdVsd    = 0x11, | 
| 175 |         OP2_CVTSI2SD_VsdEd  = 0x2A, | 
| 176 |         OP2_CVTTSD2SI_GdWsd = 0x2C, | 
| 177 |         OP2_UCOMISD_VsdWsd  = 0x2E, | 
| 178 |         OP2_ADDSD_VsdWsd    = 0x58, | 
| 179 |         OP2_MULSD_VsdWsd    = 0x59, | 
| 180 |         OP2_CVTSD2SS_VsdWsd = 0x5A, | 
| 181 |         OP2_CVTSS2SD_VsdWsd = 0x5A, | 
| 182 |         OP2_SUBSD_VsdWsd    = 0x5C, | 
| 183 |         OP2_DIVSD_VsdWsd    = 0x5E, | 
| 184 |         OP2_SQRTSD_VsdWsd   = 0x51, | 
| 185 |         OP2_ANDNPD_VpdWpd   = 0x55, | 
| 186 |         OP2_XORPD_VpdWpd    = 0x57, | 
| 187 |         OP2_MOVD_VdEd       = 0x6E, | 
| 188 |         OP2_MOVD_EdVd       = 0x7E, | 
| 189 |         OP2_JCC_rel32       = 0x80, | 
| 190 |         OP_SETCC            = 0x90, | 
| 191 |         OP2_IMUL_GvEv       = 0xAF, | 
| 192 |         OP2_MOVZX_GvEb      = 0xB6, | 
| 193 |         OP2_MOVSX_GvEb      = 0xBE, | 
| 194 |         OP2_MOVZX_GvEw      = 0xB7, | 
| 195 |         OP2_MOVSX_GvEw      = 0xBF, | 
| 196 |         OP2_PEXTRW_GdUdIb   = 0xC5, | 
| 197 |         OP2_PSLLQ_UdqIb     = 0x73, | 
| 198 |         OP2_PSRLQ_UdqIb     = 0x73, | 
| 199 |         OP2_POR_VdqWdq      = 0XEB, | 
| 200 |     } TwoByteOpcodeID; | 
| 201 |  | 
| 202 |     TwoByteOpcodeID jccRel32(Condition cond) | 
| 203 |     { | 
| 204 |         return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond); | 
| 205 |     } | 
| 206 |  | 
| 207 |     TwoByteOpcodeID setccOpcode(Condition cond) | 
| 208 |     { | 
| 209 |         return (TwoByteOpcodeID)(OP_SETCC + cond); | 
| 210 |     } | 
| 211 |  | 
| 212 |     typedef enum { | 
| 213 |         GROUP1_OP_ADD = 0, | 
| 214 |         GROUP1_OP_OR  = 1, | 
| 215 |         GROUP1_OP_ADC = 2, | 
| 216 |         GROUP1_OP_AND = 4, | 
| 217 |         GROUP1_OP_SUB = 5, | 
| 218 |         GROUP1_OP_XOR = 6, | 
| 219 |         GROUP1_OP_CMP = 7, | 
| 220 |  | 
| 221 |         GROUP1A_OP_POP = 0, | 
| 222 |          | 
| 223 |         GROUP2_OP_ROL = 0, | 
| 224 |         GROUP2_OP_ROR = 1, | 
| 225 |         GROUP2_OP_RCL = 2, | 
| 226 |         GROUP2_OP_RCR = 3, | 
| 227 |          | 
| 228 |         GROUP2_OP_SHL = 4, | 
| 229 |         GROUP2_OP_SHR = 5, | 
| 230 |         GROUP2_OP_SAR = 7, | 
| 231 |  | 
| 232 |         GROUP3_OP_TEST = 0, | 
| 233 |         GROUP3_OP_NOT  = 2, | 
| 234 |         GROUP3_OP_NEG  = 3, | 
| 235 |         GROUP3_OP_IDIV = 7, | 
| 236 |  | 
| 237 |         GROUP5_OP_CALLN = 2, | 
| 238 |         GROUP5_OP_JMPN  = 4, | 
| 239 |         GROUP5_OP_PUSH  = 6, | 
| 240 |  | 
| 241 |         GROUP11_MOV = 0, | 
| 242 |  | 
| 243 |         GROUP14_OP_PSLLQ = 6, | 
| 244 |         GROUP14_OP_PSRLQ = 2, | 
| 245 |  | 
| 246 |         ESCAPE_DD_FSTP_doubleReal = 3, | 
| 247 |     } GroupOpcodeID; | 
| 248 |      | 
| 249 |     class X86InstructionFormatter; | 
| 250 | public: | 
| 251 |  | 
| 252 |     X86Assembler() | 
| 253 |         : m_indexOfLastWatchpoint(INT_MIN) | 
| 254 |         , m_indexOfTailOfLastWatchpoint(INT_MIN) | 
| 255 |     { | 
| 256 |     } | 
| 257 |  | 
| 258 |     // Stack operations: | 
| 259 |  | 
| 260 |     void push_r(RegisterID reg) | 
| 261 |     { | 
| 262 |         m_formatter.oneByteOp(opcode: OP_PUSH_EAX, reg); | 
| 263 |     } | 
| 264 |  | 
| 265 |     void pop_r(RegisterID reg) | 
| 266 |     { | 
| 267 |         m_formatter.oneByteOp(opcode: OP_POP_EAX, reg); | 
| 268 |     } | 
| 269 |  | 
| 270 |     void push_i32(int imm) | 
| 271 |     { | 
| 272 |         m_formatter.oneByteOp(opcode: OP_PUSH_Iz); | 
| 273 |         m_formatter.immediate32(imm); | 
| 274 |     } | 
| 275 |  | 
| 276 |     void push_m(int offset, RegisterID base) | 
| 277 |     { | 
| 278 |         m_formatter.oneByteOp(opcode: OP_GROUP5_Ev, reg: GROUP5_OP_PUSH, base, offset); | 
| 279 |     } | 
| 280 |  | 
| 281 |     void pop_m(int offset, RegisterID base) | 
| 282 |     { | 
| 283 |         m_formatter.oneByteOp(opcode: OP_GROUP1A_Ev, reg: GROUP1A_OP_POP, base, offset); | 
| 284 |     } | 
| 285 |  | 
| 286 |     // Arithmetic operations: | 
| 287 |  | 
| 288 | #if !CPU(X86_64) | 
| 289 |     void adcl_im(int imm, const void* addr) | 
| 290 |     { | 
| 291 |         if (CAN_SIGN_EXTEND_8_32(imm)) { | 
| 292 |             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr); | 
| 293 |             m_formatter.immediate8(imm); | 
| 294 |         } else { | 
| 295 |             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr); | 
| 296 |             m_formatter.immediate32(imm); | 
| 297 |         } | 
| 298 |     } | 
| 299 | #endif | 
| 300 |  | 
| 301 |     void addl_rr(RegisterID src, RegisterID dst) | 
| 302 |     { | 
| 303 |         m_formatter.oneByteOp(opcode: OP_ADD_EvGv, reg: src, rm: dst); | 
| 304 |     } | 
| 305 |  | 
| 306 |     void addl_mr(int offset, RegisterID base, RegisterID dst) | 
| 307 |     { | 
| 308 |         m_formatter.oneByteOp(opcode: OP_ADD_GvEv, reg: dst, base, offset); | 
| 309 |     } | 
| 310 |      | 
| 311 | #if !CPU(X86_64) | 
| 312 |     void addl_mr(const void* addr, RegisterID dst) | 
| 313 |     { | 
| 314 |         m_formatter.oneByteOp(OP_ADD_GvEv, dst, addr); | 
| 315 |     } | 
| 316 | #endif | 
| 317 |  | 
| 318 |     void addl_rm(RegisterID src, int offset, RegisterID base) | 
| 319 |     { | 
| 320 |         m_formatter.oneByteOp(opcode: OP_ADD_EvGv, reg: src, base, offset); | 
| 321 |     } | 
| 322 |  | 
| 323 |     void addl_ir(int imm, RegisterID dst) | 
| 324 |     { | 
| 325 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 326 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_ADD, rm: dst); | 
| 327 |             m_formatter.immediate8(imm); | 
| 328 |         } else { | 
| 329 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_ADD, rm: dst); | 
| 330 |             m_formatter.immediate32(imm); | 
| 331 |         } | 
| 332 |     } | 
| 333 |  | 
| 334 |     void addl_im(int imm, int offset, RegisterID base) | 
| 335 |     { | 
| 336 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 337 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_ADD, base, offset); | 
| 338 |             m_formatter.immediate8(imm); | 
| 339 |         } else { | 
| 340 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_ADD, base, offset); | 
| 341 |             m_formatter.immediate32(imm); | 
| 342 |         } | 
| 343 |     } | 
| 344 |  | 
| 345 | #if CPU(X86_64) | 
| 346 |     void addq_rr(RegisterID src, RegisterID dst) | 
| 347 |     { | 
| 348 |         m_formatter.oneByteOp64(opcode: OP_ADD_EvGv, reg: src, rm: dst); | 
| 349 |     } | 
| 350 |  | 
| 351 |     void addq_mr(int offset, RegisterID base, RegisterID dst) | 
| 352 |     { | 
| 353 |         m_formatter.oneByteOp64(opcode: OP_ADD_GvEv, reg: dst, base, offset); | 
| 354 |     } | 
| 355 |  | 
| 356 |     void addq_ir(int imm, RegisterID dst) | 
| 357 |     { | 
| 358 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 359 |             m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_ADD, rm: dst); | 
| 360 |             m_formatter.immediate8(imm); | 
| 361 |         } else { | 
| 362 |             m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_ADD, rm: dst); | 
| 363 |             m_formatter.immediate32(imm); | 
| 364 |         } | 
| 365 |     } | 
| 366 |  | 
| 367 |     void addq_im(int imm, int offset, RegisterID base) | 
| 368 |     { | 
| 369 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 370 |             m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_ADD, base, offset); | 
| 371 |             m_formatter.immediate8(imm); | 
| 372 |         } else { | 
| 373 |             m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_ADD, base, offset); | 
| 374 |             m_formatter.immediate32(imm); | 
| 375 |         } | 
| 376 |     } | 
| 377 | #else | 
| 378 |     void addl_im(int imm, const void* addr) | 
| 379 |     { | 
| 380 |         if (CAN_SIGN_EXTEND_8_32(imm)) { | 
| 381 |             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr); | 
| 382 |             m_formatter.immediate8(imm); | 
| 383 |         } else { | 
| 384 |             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr); | 
| 385 |             m_formatter.immediate32(imm); | 
| 386 |         } | 
| 387 |     } | 
| 388 | #endif | 
| 389 |  | 
| 390 |     void andl_rr(RegisterID src, RegisterID dst) | 
| 391 |     { | 
| 392 |         m_formatter.oneByteOp(opcode: OP_AND_EvGv, reg: src, rm: dst); | 
| 393 |     } | 
| 394 |  | 
| 395 |     void andl_mr(int offset, RegisterID base, RegisterID dst) | 
| 396 |     { | 
| 397 |         m_formatter.oneByteOp(opcode: OP_AND_GvEv, reg: dst, base, offset); | 
| 398 |     } | 
| 399 |  | 
| 400 |     void andl_rm(RegisterID src, int offset, RegisterID base) | 
| 401 |     { | 
| 402 |         m_formatter.oneByteOp(opcode: OP_AND_EvGv, reg: src, base, offset); | 
| 403 |     } | 
| 404 |  | 
| 405 |     void andl_ir(int imm, RegisterID dst) | 
| 406 |     { | 
| 407 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 408 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_AND, rm: dst); | 
| 409 |             m_formatter.immediate8(imm); | 
| 410 |         } else { | 
| 411 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_AND, rm: dst); | 
| 412 |             m_formatter.immediate32(imm); | 
| 413 |         } | 
| 414 |     } | 
| 415 |  | 
| 416 |     void andl_im(int imm, int offset, RegisterID base) | 
| 417 |     { | 
| 418 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 419 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_AND, base, offset); | 
| 420 |             m_formatter.immediate8(imm); | 
| 421 |         } else { | 
| 422 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_AND, base, offset); | 
| 423 |             m_formatter.immediate32(imm); | 
| 424 |         } | 
| 425 |     } | 
| 426 |  | 
| 427 | #if CPU(X86_64) | 
| 428 |     void andq_rr(RegisterID src, RegisterID dst) | 
| 429 |     { | 
| 430 |         m_formatter.oneByteOp64(opcode: OP_AND_EvGv, reg: src, rm: dst); | 
| 431 |     } | 
| 432 |  | 
| 433 |     void andq_ir(int imm, RegisterID dst) | 
| 434 |     { | 
| 435 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 436 |             m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_AND, rm: dst); | 
| 437 |             m_formatter.immediate8(imm); | 
| 438 |         } else { | 
| 439 |             m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_AND, rm: dst); | 
| 440 |             m_formatter.immediate32(imm); | 
| 441 |         } | 
| 442 |     } | 
| 443 | #else | 
| 444 |     void andl_im(int imm, const void* addr) | 
| 445 |     { | 
| 446 |         if (CAN_SIGN_EXTEND_8_32(imm)) { | 
| 447 |             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr); | 
| 448 |             m_formatter.immediate8(imm); | 
| 449 |         } else { | 
| 450 |             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr); | 
| 451 |             m_formatter.immediate32(imm); | 
| 452 |         } | 
| 453 |     } | 
| 454 | #endif | 
| 455 |  | 
| 456 |     void negl_r(RegisterID dst) | 
| 457 |     { | 
| 458 |         m_formatter.oneByteOp(opcode: OP_GROUP3_Ev, reg: GROUP3_OP_NEG, rm: dst); | 
| 459 |     } | 
| 460 |  | 
| 461 | #if CPU(X86_64) | 
| 462 |     void negq_r(RegisterID dst) | 
| 463 |     { | 
| 464 |         m_formatter.oneByteOp64(opcode: OP_GROUP3_Ev, reg: GROUP3_OP_NEG, rm: dst); | 
| 465 |     } | 
| 466 | #endif | 
| 467 |  | 
| 468 |     void negl_m(int offset, RegisterID base) | 
| 469 |     { | 
| 470 |         m_formatter.oneByteOp(opcode: OP_GROUP3_Ev, reg: GROUP3_OP_NEG, base, offset); | 
| 471 |     } | 
| 472 |  | 
| 473 |     void notl_r(RegisterID dst) | 
| 474 |     { | 
| 475 |         m_formatter.oneByteOp(opcode: OP_GROUP3_Ev, reg: GROUP3_OP_NOT, rm: dst); | 
| 476 |     } | 
| 477 |  | 
| 478 |     void notl_m(int offset, RegisterID base) | 
| 479 |     { | 
| 480 |         m_formatter.oneByteOp(opcode: OP_GROUP3_Ev, reg: GROUP3_OP_NOT, base, offset); | 
| 481 |     } | 
| 482 |  | 
| 483 |     void orl_rr(RegisterID src, RegisterID dst) | 
| 484 |     { | 
| 485 |         m_formatter.oneByteOp(opcode: OP_OR_EvGv, reg: src, rm: dst); | 
| 486 |     } | 
| 487 |  | 
| 488 |     void orl_mr(int offset, RegisterID base, RegisterID dst) | 
| 489 |     { | 
| 490 |         m_formatter.oneByteOp(opcode: OP_OR_GvEv, reg: dst, base, offset); | 
| 491 |     } | 
| 492 |  | 
| 493 |     void orl_rm(RegisterID src, int offset, RegisterID base) | 
| 494 |     { | 
| 495 |         m_formatter.oneByteOp(opcode: OP_OR_EvGv, reg: src, base, offset); | 
| 496 |     } | 
| 497 |  | 
| 498 |     void orl_ir(int imm, RegisterID dst) | 
| 499 |     { | 
| 500 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 501 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_OR, rm: dst); | 
| 502 |             m_formatter.immediate8(imm); | 
| 503 |         } else { | 
| 504 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_OR, rm: dst); | 
| 505 |             m_formatter.immediate32(imm); | 
| 506 |         } | 
| 507 |     } | 
| 508 |  | 
| 509 |     void orl_im(int imm, int offset, RegisterID base) | 
| 510 |     { | 
| 511 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 512 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_OR, base, offset); | 
| 513 |             m_formatter.immediate8(imm); | 
| 514 |         } else { | 
| 515 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_OR, base, offset); | 
| 516 |             m_formatter.immediate32(imm); | 
| 517 |         } | 
| 518 |     } | 
| 519 |  | 
| 520 | #if CPU(X86_64) | 
| 521 |     void orq_rr(RegisterID src, RegisterID dst) | 
| 522 |     { | 
| 523 |         m_formatter.oneByteOp64(opcode: OP_OR_EvGv, reg: src, rm: dst); | 
| 524 |     } | 
| 525 |  | 
| 526 |     void orq_ir(int imm, RegisterID dst) | 
| 527 |     { | 
| 528 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 529 |             m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_OR, rm: dst); | 
| 530 |             m_formatter.immediate8(imm); | 
| 531 |         } else { | 
| 532 |             m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_OR, rm: dst); | 
| 533 |             m_formatter.immediate32(imm); | 
| 534 |         } | 
| 535 |     } | 
| 536 | #else | 
| 537 |     void orl_im(int imm, const void* addr) | 
| 538 |     { | 
| 539 |         if (CAN_SIGN_EXTEND_8_32(imm)) { | 
| 540 |             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr); | 
| 541 |             m_formatter.immediate8(imm); | 
| 542 |         } else { | 
| 543 |             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr); | 
| 544 |             m_formatter.immediate32(imm); | 
| 545 |         } | 
| 546 |     } | 
| 547 |  | 
| 548 |     void orl_rm(RegisterID src, const void* addr) | 
| 549 |     { | 
| 550 |         m_formatter.oneByteOp(OP_OR_EvGv, src, addr); | 
| 551 |     } | 
| 552 | #endif | 
| 553 |  | 
| 554 |     void subl_rr(RegisterID src, RegisterID dst) | 
| 555 |     { | 
| 556 |         m_formatter.oneByteOp(opcode: OP_SUB_EvGv, reg: src, rm: dst); | 
| 557 |     } | 
| 558 |  | 
| 559 |     void subl_mr(int offset, RegisterID base, RegisterID dst) | 
| 560 |     { | 
| 561 |         m_formatter.oneByteOp(opcode: OP_SUB_GvEv, reg: dst, base, offset); | 
| 562 |     } | 
| 563 |  | 
| 564 |     void subl_rm(RegisterID src, int offset, RegisterID base) | 
| 565 |     { | 
| 566 |         m_formatter.oneByteOp(opcode: OP_SUB_EvGv, reg: src, base, offset); | 
| 567 |     } | 
| 568 |  | 
| 569 |     void subl_ir(int imm, RegisterID dst) | 
| 570 |     { | 
| 571 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 572 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_SUB, rm: dst); | 
| 573 |             m_formatter.immediate8(imm); | 
| 574 |         } else { | 
| 575 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_SUB, rm: dst); | 
| 576 |             m_formatter.immediate32(imm); | 
| 577 |         } | 
| 578 |     } | 
| 579 |      | 
| 580 |     void subl_im(int imm, int offset, RegisterID base) | 
| 581 |     { | 
| 582 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 583 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_SUB, base, offset); | 
| 584 |             m_formatter.immediate8(imm); | 
| 585 |         } else { | 
| 586 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_SUB, base, offset); | 
| 587 |             m_formatter.immediate32(imm); | 
| 588 |         } | 
| 589 |     } | 
| 590 |  | 
| 591 | #if CPU(X86_64) | 
| 592 |     void subq_rr(RegisterID src, RegisterID dst) | 
| 593 |     { | 
| 594 |         m_formatter.oneByteOp64(opcode: OP_SUB_EvGv, reg: src, rm: dst); | 
| 595 |     } | 
| 596 |  | 
| 597 |     void subq_ir(int imm, RegisterID dst) | 
| 598 |     { | 
| 599 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 600 |             m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_SUB, rm: dst); | 
| 601 |             m_formatter.immediate8(imm); | 
| 602 |         } else { | 
| 603 |             m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_SUB, rm: dst); | 
| 604 |             m_formatter.immediate32(imm); | 
| 605 |         } | 
| 606 |     } | 
| 607 | #else | 
| 608 |     void subl_im(int imm, const void* addr) | 
| 609 |     { | 
| 610 |         if (CAN_SIGN_EXTEND_8_32(imm)) { | 
| 611 |             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr); | 
| 612 |             m_formatter.immediate8(imm); | 
| 613 |         } else { | 
| 614 |             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr); | 
| 615 |             m_formatter.immediate32(imm); | 
| 616 |         } | 
| 617 |     } | 
| 618 | #endif | 
| 619 |  | 
| 620 |     void xorl_rr(RegisterID src, RegisterID dst) | 
| 621 |     { | 
| 622 |         m_formatter.oneByteOp(opcode: OP_XOR_EvGv, reg: src, rm: dst); | 
| 623 |     } | 
| 624 |  | 
| 625 |     void xorl_mr(int offset, RegisterID base, RegisterID dst) | 
| 626 |     { | 
| 627 |         m_formatter.oneByteOp(opcode: OP_XOR_GvEv, reg: dst, base, offset); | 
| 628 |     } | 
| 629 |  | 
| 630 |     void xorl_rm(RegisterID src, int offset, RegisterID base) | 
| 631 |     { | 
| 632 |         m_formatter.oneByteOp(opcode: OP_XOR_EvGv, reg: src, base, offset); | 
| 633 |     } | 
| 634 |  | 
| 635 |     void xorl_im(int imm, int offset, RegisterID base) | 
| 636 |     { | 
| 637 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 638 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_XOR, base, offset); | 
| 639 |             m_formatter.immediate8(imm); | 
| 640 |         } else { | 
| 641 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_XOR, base, offset); | 
| 642 |             m_formatter.immediate32(imm); | 
| 643 |         } | 
| 644 |     } | 
| 645 |  | 
| 646 |     void xorl_ir(int imm, RegisterID dst) | 
| 647 |     { | 
| 648 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 649 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_XOR, rm: dst); | 
| 650 |             m_formatter.immediate8(imm); | 
| 651 |         } else { | 
| 652 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_XOR, rm: dst); | 
| 653 |             m_formatter.immediate32(imm); | 
| 654 |         } | 
| 655 |     } | 
| 656 |  | 
| 657 | #if CPU(X86_64) | 
| 658 |     void xorq_rr(RegisterID src, RegisterID dst) | 
| 659 |     { | 
| 660 |         m_formatter.oneByteOp64(opcode: OP_XOR_EvGv, reg: src, rm: dst); | 
| 661 |     } | 
| 662 |  | 
| 663 |     void xorq_ir(int imm, RegisterID dst) | 
| 664 |     { | 
| 665 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 666 |             m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_XOR, rm: dst); | 
| 667 |             m_formatter.immediate8(imm); | 
| 668 |         } else { | 
| 669 |             m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_XOR, rm: dst); | 
| 670 |             m_formatter.immediate32(imm); | 
| 671 |         } | 
| 672 |     } | 
| 673 |      | 
| 674 |     void xorq_rm(RegisterID src, int offset, RegisterID base) | 
| 675 |     { | 
| 676 |         m_formatter.oneByteOp64(opcode: OP_XOR_EvGv, reg: src, base, offset); | 
| 677 |     } | 
| 678 |      | 
| 679 |     void rorq_i8r(int imm, RegisterID dst) | 
| 680 |     { | 
| 681 |         if (imm == 1) | 
| 682 |             m_formatter.oneByteOp64(opcode: OP_GROUP2_Ev1, reg: GROUP2_OP_ROR, rm: dst); | 
| 683 |         else { | 
| 684 |             m_formatter.oneByteOp64(opcode: OP_GROUP2_EvIb, reg: GROUP2_OP_ROR, rm: dst); | 
| 685 |             m_formatter.immediate8(imm); | 
| 686 |         } | 
| 687 |     } | 
| 688 |  | 
| 689 |     void sarq_CLr(RegisterID dst) | 
| 690 |     { | 
| 691 |         m_formatter.oneByteOp64(opcode: OP_GROUP2_EvCL, reg: GROUP2_OP_SAR, rm: dst); | 
| 692 |     } | 
| 693 |  | 
| 694 |     void sarq_i8r(int imm, RegisterID dst) | 
| 695 |     { | 
| 696 |         if (imm == 1) | 
| 697 |             m_formatter.oneByteOp64(opcode: OP_GROUP2_Ev1, reg: GROUP2_OP_SAR, rm: dst); | 
| 698 |         else { | 
| 699 |             m_formatter.oneByteOp64(opcode: OP_GROUP2_EvIb, reg: GROUP2_OP_SAR, rm: dst); | 
| 700 |             m_formatter.immediate8(imm); | 
| 701 |         } | 
| 702 |     } | 
| 703 |  | 
| 704 |     void shrq_i8r(int imm, RegisterID dst) | 
| 705 |     { | 
| 706 |         // ### doesn't work when removing the "0 &&" | 
| 707 |         if (0 && imm == 1) | 
| 708 |             m_formatter.oneByteOp64(opcode: OP_GROUP2_Ev1, reg: GROUP2_OP_SHR, rm: dst); | 
| 709 |         else { | 
| 710 |             m_formatter.oneByteOp64(opcode: OP_GROUP2_EvIb, reg: GROUP2_OP_SHR, rm: dst); | 
| 711 |             m_formatter.immediate8(imm); | 
| 712 |         } | 
| 713 |     } | 
| 714 |  | 
| 715 |     void shrq_CLr(RegisterID dst) | 
| 716 |     { | 
| 717 |         m_formatter.oneByteOp64(opcode: OP_GROUP2_EvCL, reg: GROUP2_OP_SHR, rm: dst); | 
| 718 |     } | 
| 719 |  | 
| 720 |     void shlq_i8r(int imm, RegisterID dst) | 
| 721 |     { | 
| 722 |         // ### doesn't work when removing the "0 &&" | 
| 723 |         if (0 && imm == 1) | 
| 724 |             m_formatter.oneByteOp64(opcode: OP_GROUP2_Ev1, reg: GROUP2_OP_SHL, rm: dst); | 
| 725 |         else { | 
| 726 |             m_formatter.oneByteOp64(opcode: OP_GROUP2_EvIb, reg: GROUP2_OP_SHL, rm: dst); | 
| 727 |             m_formatter.immediate8(imm); | 
| 728 |         } | 
| 729 |     } | 
| 730 |  | 
| 731 |     void shlq_CLr(RegisterID dst) | 
| 732 |     { | 
| 733 |         m_formatter.oneByteOp64(opcode: OP_GROUP2_EvCL, reg: GROUP2_OP_SHL, rm: dst); | 
| 734 |     } | 
| 735 | #endif | 
| 736 |  | 
| 737 |     void sarl_i8r(int imm, RegisterID dst) | 
| 738 |     { | 
| 739 |         if (imm == 1) | 
| 740 |             m_formatter.oneByteOp(opcode: OP_GROUP2_Ev1, reg: GROUP2_OP_SAR, rm: dst); | 
| 741 |         else { | 
| 742 |             m_formatter.oneByteOp(opcode: OP_GROUP2_EvIb, reg: GROUP2_OP_SAR, rm: dst); | 
| 743 |             m_formatter.immediate8(imm); | 
| 744 |         } | 
| 745 |     } | 
| 746 |  | 
| 747 |     void sarl_CLr(RegisterID dst) | 
| 748 |     { | 
| 749 |         m_formatter.oneByteOp(opcode: OP_GROUP2_EvCL, reg: GROUP2_OP_SAR, rm: dst); | 
| 750 |     } | 
| 751 |      | 
| 752 |     void shrl_i8r(int imm, RegisterID dst) | 
| 753 |     { | 
| 754 |         if (imm == 1) | 
| 755 |             m_formatter.oneByteOp(opcode: OP_GROUP2_Ev1, reg: GROUP2_OP_SHR, rm: dst); | 
| 756 |         else { | 
| 757 |             m_formatter.oneByteOp(opcode: OP_GROUP2_EvIb, reg: GROUP2_OP_SHR, rm: dst); | 
| 758 |             m_formatter.immediate8(imm); | 
| 759 |         } | 
| 760 |     } | 
| 761 |      | 
| 762 |     void shrl_CLr(RegisterID dst) | 
| 763 |     { | 
| 764 |         m_formatter.oneByteOp(opcode: OP_GROUP2_EvCL, reg: GROUP2_OP_SHR, rm: dst); | 
| 765 |     } | 
| 766 |  | 
| 767 |     void shll_i8r(int imm, RegisterID dst) | 
| 768 |     { | 
| 769 |         if (imm == 1) | 
| 770 |             m_formatter.oneByteOp(opcode: OP_GROUP2_Ev1, reg: GROUP2_OP_SHL, rm: dst); | 
| 771 |         else { | 
| 772 |             m_formatter.oneByteOp(opcode: OP_GROUP2_EvIb, reg: GROUP2_OP_SHL, rm: dst); | 
| 773 |             m_formatter.immediate8(imm); | 
| 774 |         } | 
| 775 |     } | 
| 776 |  | 
| 777 |     void shll_CLr(RegisterID dst) | 
| 778 |     { | 
| 779 |         m_formatter.oneByteOp(opcode: OP_GROUP2_EvCL, reg: GROUP2_OP_SHL, rm: dst); | 
| 780 |     } | 
| 781 |  | 
| 782 |     void imull_rr(RegisterID src, RegisterID dst) | 
| 783 |     { | 
| 784 |         m_formatter.twoByteOp(opcode: OP2_IMUL_GvEv, reg: dst, rm: src); | 
| 785 |     } | 
| 786 |  | 
| 787 |     void imull_mr(int offset, RegisterID base, RegisterID dst) | 
| 788 |     { | 
| 789 |         m_formatter.twoByteOp(opcode: OP2_IMUL_GvEv, reg: dst, base, offset); | 
| 790 |     } | 
| 791 |  | 
| 792 |     void imull_i32r(RegisterID src, int32_t value, RegisterID dst) | 
| 793 |     { | 
| 794 |         m_formatter.oneByteOp(opcode: OP_IMUL_GvEvIz, reg: dst, rm: src); | 
| 795 |         m_formatter.immediate32(imm: value); | 
| 796 |     } | 
| 797 |  | 
| 798 |     void idivl_r(RegisterID dst) | 
| 799 |     { | 
| 800 |         m_formatter.oneByteOp(opcode: OP_GROUP3_Ev, reg: GROUP3_OP_IDIV, rm: dst); | 
| 801 |     } | 
| 802 |  | 
| 803 |     // Comparisons: | 
| 804 |  | 
| 805 |     void cmpl_rr(RegisterID src, RegisterID dst) | 
| 806 |     { | 
| 807 |         m_formatter.oneByteOp(opcode: OP_CMP_EvGv, reg: src, rm: dst); | 
| 808 |     } | 
| 809 |  | 
| 810 |     void cmpl_rm(RegisterID src, int offset, RegisterID base) | 
| 811 |     { | 
| 812 |         m_formatter.oneByteOp(opcode: OP_CMP_EvGv, reg: src, base, offset); | 
| 813 |     } | 
| 814 |  | 
| 815 |     void cmpl_mr(int offset, RegisterID base, RegisterID src) | 
| 816 |     { | 
| 817 |         m_formatter.oneByteOp(opcode: OP_CMP_GvEv, reg: src, base, offset); | 
| 818 |     } | 
| 819 |  | 
| 820 |     void cmpl_ir(int imm, RegisterID dst) | 
| 821 |     { | 
| 822 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 823 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_CMP, rm: dst); | 
| 824 |             m_formatter.immediate8(imm); | 
| 825 |         } else { | 
| 826 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, rm: dst); | 
| 827 |             m_formatter.immediate32(imm); | 
| 828 |         } | 
| 829 |     } | 
| 830 |  | 
| 831 |     void cmpl_ir_force32(int imm, RegisterID dst) | 
| 832 |     { | 
| 833 |         m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, rm: dst); | 
| 834 |         m_formatter.immediate32(imm); | 
| 835 |     } | 
| 836 |      | 
| 837 |     void cmpl_im(int imm, int offset, RegisterID base) | 
| 838 |     { | 
| 839 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 840 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_CMP, base, offset); | 
| 841 |             m_formatter.immediate8(imm); | 
| 842 |         } else { | 
| 843 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, base, offset); | 
| 844 |             m_formatter.immediate32(imm); | 
| 845 |         } | 
| 846 |     } | 
| 847 |      | 
| 848 |     void cmpb_im(int imm, int offset, RegisterID base) | 
| 849 |     { | 
| 850 |         m_formatter.oneByteOp(opcode: OP_GROUP1_EbIb, reg: GROUP1_OP_CMP, base, offset); | 
| 851 |         m_formatter.immediate8(imm); | 
| 852 |     } | 
| 853 |      | 
| 854 |     void cmpb_im(int imm, int offset, RegisterID base, RegisterID index, int scale) | 
| 855 |     { | 
| 856 |         m_formatter.oneByteOp(opcode: OP_GROUP1_EbIb, reg: GROUP1_OP_CMP, base, index, scale, offset); | 
| 857 |         m_formatter.immediate8(imm); | 
| 858 |     } | 
| 859 |      | 
| 860 | #if CPU(X86) | 
| 861 |     void cmpb_im(int imm, const void* addr) | 
| 862 |     { | 
| 863 |         m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, addr); | 
| 864 |         m_formatter.immediate8(imm); | 
| 865 |     } | 
| 866 | #endif | 
| 867 |  | 
| 868 |     void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale) | 
| 869 |     { | 
| 870 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 871 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_CMP, base, index, scale, offset); | 
| 872 |             m_formatter.immediate8(imm); | 
| 873 |         } else { | 
| 874 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, base, index, scale, offset); | 
| 875 |             m_formatter.immediate32(imm); | 
| 876 |         } | 
| 877 |     } | 
| 878 |  | 
| 879 |     void cmpl_im_force32(int imm, int offset, RegisterID base) | 
| 880 |     { | 
| 881 |         m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, base, offset); | 
| 882 |         m_formatter.immediate32(imm); | 
| 883 |     } | 
| 884 |  | 
| 885 | #if CPU(X86_64) | 
| 886 |     void cmpq_rr(RegisterID src, RegisterID dst) | 
| 887 |     { | 
| 888 |         m_formatter.oneByteOp64(opcode: OP_CMP_EvGv, reg: src, rm: dst); | 
| 889 |     } | 
| 890 |  | 
| 891 |     void cmpq_rm(RegisterID src, int offset, RegisterID base) | 
| 892 |     { | 
| 893 |         m_formatter.oneByteOp64(opcode: OP_CMP_EvGv, reg: src, base, offset); | 
| 894 |     } | 
| 895 |  | 
| 896 |     void cmpq_mr(int offset, RegisterID base, RegisterID src) | 
| 897 |     { | 
| 898 |         m_formatter.oneByteOp64(opcode: OP_CMP_GvEv, reg: src, base, offset); | 
| 899 |     } | 
| 900 |  | 
| 901 |     void cmpq_ir(int imm, RegisterID dst) | 
| 902 |     { | 
| 903 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 904 |             m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_CMP, rm: dst); | 
| 905 |             m_formatter.immediate8(imm); | 
| 906 |         } else { | 
| 907 |             m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, rm: dst); | 
| 908 |             m_formatter.immediate32(imm); | 
| 909 |         } | 
| 910 |     } | 
| 911 |  | 
| 912 |     void cmpq_im(int imm, int offset, RegisterID base) | 
| 913 |     { | 
| 914 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 915 |             m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_CMP, base, offset); | 
| 916 |             m_formatter.immediate8(imm); | 
| 917 |         } else { | 
| 918 |             m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, base, offset); | 
| 919 |             m_formatter.immediate32(imm); | 
| 920 |         } | 
| 921 |     } | 
| 922 |  | 
| 923 |     void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale) | 
| 924 |     { | 
| 925 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 926 |             m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_CMP, base, index, scale, offset); | 
| 927 |             m_formatter.immediate8(imm); | 
| 928 |         } else { | 
| 929 |             m_formatter.oneByteOp64(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, base, index, scale, offset); | 
| 930 |             m_formatter.immediate32(imm); | 
| 931 |         } | 
| 932 |     } | 
| 933 | #else | 
| 934 |     void cmpl_rm(RegisterID reg, const void* addr) | 
| 935 |     { | 
| 936 |         m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr); | 
| 937 |     } | 
| 938 |  | 
| 939 |     void cmpl_im(int imm, const void* addr) | 
| 940 |     { | 
| 941 |         if (CAN_SIGN_EXTEND_8_32(imm)) { | 
| 942 |             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr); | 
| 943 |             m_formatter.immediate8(imm); | 
| 944 |         } else { | 
| 945 |             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr); | 
| 946 |             m_formatter.immediate32(imm); | 
| 947 |         } | 
| 948 |     } | 
| 949 | #endif | 
| 950 |  | 
| 951 |     void cmpw_ir(int imm, RegisterID dst) | 
| 952 |     { | 
| 953 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 954 |             m_formatter.prefix(pre: PRE_OPERAND_SIZE); | 
| 955 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_CMP, rm: dst); | 
| 956 |             m_formatter.immediate8(imm); | 
| 957 |         } else { | 
| 958 |             m_formatter.prefix(pre: PRE_OPERAND_SIZE); | 
| 959 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, rm: dst); | 
| 960 |             m_formatter.immediate16(imm); | 
| 961 |         } | 
| 962 |     } | 
| 963 |  | 
| 964 |     void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) | 
| 965 |     { | 
| 966 |         m_formatter.prefix(pre: PRE_OPERAND_SIZE); | 
| 967 |         m_formatter.oneByteOp(opcode: OP_CMP_EvGv, reg: src, base, index, scale, offset); | 
| 968 |     } | 
| 969 |  | 
| 970 |     void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale) | 
| 971 |     { | 
| 972 |         if (CAN_SIGN_EXTEND_8_32(value: imm)) { | 
| 973 |             m_formatter.prefix(pre: PRE_OPERAND_SIZE); | 
| 974 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIb, reg: GROUP1_OP_CMP, base, index, scale, offset); | 
| 975 |             m_formatter.immediate8(imm); | 
| 976 |         } else { | 
| 977 |             m_formatter.prefix(pre: PRE_OPERAND_SIZE); | 
| 978 |             m_formatter.oneByteOp(opcode: OP_GROUP1_EvIz, reg: GROUP1_OP_CMP, base, index, scale, offset); | 
| 979 |             m_formatter.immediate16(imm); | 
| 980 |         } | 
| 981 |     } | 
| 982 |  | 
| 983 |     void testl_rr(RegisterID src, RegisterID dst) | 
| 984 |     { | 
| 985 |         m_formatter.oneByteOp(opcode: OP_TEST_EvGv, reg: src, rm: dst); | 
| 986 |     } | 
| 987 |      | 
| 988 |     void testl_i32r(int imm, RegisterID dst) | 
| 989 |     { | 
| 990 |         m_formatter.oneByteOp(opcode: OP_GROUP3_EvIz, reg: GROUP3_OP_TEST, rm: dst); | 
| 991 |         m_formatter.immediate32(imm); | 
| 992 |     } | 
| 993 |  | 
| 994 |     void testl_i32m(int imm, int offset, RegisterID base) | 
| 995 |     { | 
| 996 |         m_formatter.oneByteOp(opcode: OP_GROUP3_EvIz, reg: GROUP3_OP_TEST, base, offset); | 
| 997 |         m_formatter.immediate32(imm); | 
| 998 |     } | 
| 999 |  | 
| 1000 |     void testb_rr(RegisterID src, RegisterID dst) | 
| 1001 |     { | 
| 1002 |         m_formatter.oneByteOp8(opcode: OP_TEST_EbGb, reg: src, rm: dst); | 
| 1003 |     } | 
| 1004 |  | 
| 1005 |     void testb_im(int imm, int offset, RegisterID base) | 
| 1006 |     { | 
| 1007 |         m_formatter.oneByteOp(opcode: OP_GROUP3_EbIb, reg: GROUP3_OP_TEST, base, offset); | 
| 1008 |         m_formatter.immediate8(imm); | 
| 1009 |     } | 
| 1010 |      | 
| 1011 |     void testb_im(int imm, int offset, RegisterID base, RegisterID index, int scale) | 
| 1012 |     { | 
| 1013 |         m_formatter.oneByteOp(opcode: OP_GROUP3_EbIb, reg: GROUP3_OP_TEST, base, index, scale, offset); | 
| 1014 |         m_formatter.immediate8(imm); | 
| 1015 |     } | 
| 1016 |  | 
| 1017 | #if CPU(X86) | 
| 1018 |     void testb_im(int imm, const void* addr) | 
| 1019 |     { | 
| 1020 |         m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, addr); | 
| 1021 |         m_formatter.immediate8(imm); | 
| 1022 |     } | 
| 1023 | #endif | 
| 1024 |  | 
| 1025 |     void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale) | 
| 1026 |     { | 
| 1027 |         m_formatter.oneByteOp(opcode: OP_GROUP3_EvIz, reg: GROUP3_OP_TEST, base, index, scale, offset); | 
| 1028 |         m_formatter.immediate32(imm); | 
| 1029 |     } | 
| 1030 |  | 
| 1031 | #if CPU(X86_64) | 
| 1032 |     void testq_rr(RegisterID src, RegisterID dst) | 
| 1033 |     { | 
| 1034 |         m_formatter.oneByteOp64(opcode: OP_TEST_EvGv, reg: src, rm: dst); | 
| 1035 |     } | 
| 1036 |  | 
| 1037 |     void testq_rm(RegisterID src, int offset, RegisterID base) | 
| 1038 |     { | 
| 1039 |         m_formatter.oneByteOp64(opcode: OP_TEST_EvGv, reg: src, base, offset); | 
| 1040 |     } | 
| 1041 |  | 
| 1042 |     void testq_i32r(int imm, RegisterID dst) | 
| 1043 |     { | 
| 1044 |         m_formatter.oneByteOp64(opcode: OP_GROUP3_EvIz, reg: GROUP3_OP_TEST, rm: dst); | 
| 1045 |         m_formatter.immediate32(imm); | 
| 1046 |     } | 
| 1047 |  | 
| 1048 |     void testq_i32m(int imm, int offset, RegisterID base) | 
| 1049 |     { | 
| 1050 |         m_formatter.oneByteOp64(opcode: OP_GROUP3_EvIz, reg: GROUP3_OP_TEST, base, offset); | 
| 1051 |         m_formatter.immediate32(imm); | 
| 1052 |     } | 
| 1053 |  | 
| 1054 |     void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale) | 
| 1055 |     { | 
| 1056 |         m_formatter.oneByteOp64(opcode: OP_GROUP3_EvIz, reg: GROUP3_OP_TEST, base, index, scale, offset); | 
| 1057 |         m_formatter.immediate32(imm); | 
| 1058 |     } | 
| 1059 | #endif  | 
| 1060 |  | 
| 1061 |     void testw_rr(RegisterID src, RegisterID dst) | 
| 1062 |     { | 
| 1063 |         m_formatter.prefix(pre: PRE_OPERAND_SIZE); | 
| 1064 |         m_formatter.oneByteOp(opcode: OP_TEST_EvGv, reg: src, rm: dst); | 
| 1065 |     } | 
| 1066 |      | 
| 1067 |     void testb_i8r(int imm, RegisterID dst) | 
| 1068 |     { | 
| 1069 |         m_formatter.oneByteOp8(opcode: OP_GROUP3_EbIb, groupOp: GROUP3_OP_TEST, rm: dst); | 
| 1070 |         m_formatter.immediate8(imm); | 
| 1071 |     } | 
| 1072 |  | 
| 1073 |     void setCC_r(Condition cond, RegisterID dst) | 
| 1074 |     { | 
| 1075 |         m_formatter.twoByteOp8(opcode: setccOpcode(cond), groupOp: (GroupOpcodeID)0, rm: dst); | 
| 1076 |     } | 
| 1077 |  | 
| 1078 |     void sete_r(RegisterID dst) | 
| 1079 |     { | 
| 1080 |         m_formatter.twoByteOp8(opcode: setccOpcode(ConditionE), groupOp: (GroupOpcodeID)0, rm: dst); | 
| 1081 |     } | 
| 1082 |  | 
| 1083 |     void setz_r(RegisterID dst) | 
| 1084 |     { | 
| 1085 |         sete_r(dst); | 
| 1086 |     } | 
| 1087 |  | 
| 1088 |     void setne_r(RegisterID dst) | 
| 1089 |     { | 
| 1090 |         m_formatter.twoByteOp8(opcode: setccOpcode(ConditionNE), groupOp: (GroupOpcodeID)0, rm: dst); | 
| 1091 |     } | 
| 1092 |  | 
| 1093 |     void setnz_r(RegisterID dst) | 
| 1094 |     { | 
| 1095 |         setne_r(dst); | 
| 1096 |     } | 
| 1097 |  | 
| 1098 |     // Various move ops: | 
| 1099 |  | 
| 1100 |     void cdq() | 
| 1101 |     { | 
| 1102 |         m_formatter.oneByteOp(opcode: OP_CDQ); | 
| 1103 |     } | 
| 1104 |  | 
| 1105 |     void fstpl(int offset, RegisterID base) | 
| 1106 |     { | 
| 1107 |         m_formatter.oneByteOp(opcode: OP_ESCAPE_DD, reg: ESCAPE_DD_FSTP_doubleReal, base, offset); | 
| 1108 |     } | 
| 1109 |  | 
| 1110 |     void xchgl_rr(RegisterID src, RegisterID dst) | 
| 1111 |     { | 
| 1112 |         m_formatter.oneByteOp(opcode: OP_XCHG_EvGv, reg: src, rm: dst); | 
| 1113 |     } | 
| 1114 |  | 
| 1115 | #if CPU(X86_64) | 
| 1116 |     void xchgq_rr(RegisterID src, RegisterID dst) | 
| 1117 |     { | 
| 1118 |         m_formatter.oneByteOp64(opcode: OP_XCHG_EvGv, reg: src, rm: dst); | 
| 1119 |     } | 
| 1120 | #endif | 
| 1121 |  | 
| 1122 |     void movl_rr(RegisterID src, RegisterID dst) | 
| 1123 |     { | 
| 1124 |         m_formatter.oneByteOp(opcode: OP_MOV_EvGv, reg: src, rm: dst); | 
| 1125 |     } | 
| 1126 |      | 
| 1127 |     void movl_rm(RegisterID src, int offset, RegisterID base) | 
| 1128 |     { | 
| 1129 |         m_formatter.oneByteOp(opcode: OP_MOV_EvGv, reg: src, base, offset); | 
| 1130 |     } | 
| 1131 |  | 
| 1132 |     void movl_rm_disp32(RegisterID src, int offset, RegisterID base) | 
| 1133 |     { | 
| 1134 |         m_formatter.oneByteOp_disp32(opcode: OP_MOV_EvGv, reg: src, base, offset); | 
| 1135 |     } | 
| 1136 |  | 
| 1137 |     void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) | 
| 1138 |     { | 
| 1139 |         m_formatter.oneByteOp(opcode: OP_MOV_EvGv, reg: src, base, index, scale, offset); | 
| 1140 |     } | 
| 1141 |      | 
| 1142 |     void movl_mEAX(const void* addr) | 
| 1143 |     { | 
| 1144 |         m_formatter.oneByteOp(opcode: OP_MOV_EAXOv); | 
| 1145 | #if CPU(X86_64) | 
| 1146 |         m_formatter.immediate64(imm: reinterpret_cast<int64_t>(addr)); | 
| 1147 | #else | 
| 1148 |         m_formatter.immediate32(reinterpret_cast<int>(addr)); | 
| 1149 | #endif | 
| 1150 |     } | 
| 1151 |  | 
| 1152 |     void movl_mr(int offset, RegisterID base, RegisterID dst) | 
| 1153 |     { | 
| 1154 |         m_formatter.oneByteOp(opcode: OP_MOV_GvEv, reg: dst, base, offset); | 
| 1155 |     } | 
| 1156 |  | 
| 1157 |     void movl_mr_disp32(int offset, RegisterID base, RegisterID dst) | 
| 1158 |     { | 
| 1159 |         m_formatter.oneByteOp_disp32(opcode: OP_MOV_GvEv, reg: dst, base, offset); | 
| 1160 |     } | 
| 1161 |      | 
| 1162 |     void movl_mr_disp8(int offset, RegisterID base, RegisterID dst) | 
| 1163 |     { | 
| 1164 |         m_formatter.oneByteOp_disp8(opcode: OP_MOV_GvEv, reg: dst, base, offset); | 
| 1165 |     } | 
| 1166 |  | 
| 1167 |     void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) | 
| 1168 |     { | 
| 1169 |         m_formatter.oneByteOp(opcode: OP_MOV_GvEv, reg: dst, base, index, scale, offset); | 
| 1170 |     } | 
| 1171 |  | 
| 1172 |     void movl_i32r(int imm, RegisterID dst) | 
| 1173 |     { | 
| 1174 |         m_formatter.oneByteOp(opcode: OP_MOV_EAXIv, reg: dst); | 
| 1175 |         m_formatter.immediate32(imm); | 
| 1176 |     } | 
| 1177 |  | 
| 1178 |     void movl_i32m(int imm, int offset, RegisterID base) | 
| 1179 |     { | 
| 1180 |         m_formatter.oneByteOp(opcode: OP_GROUP11_EvIz, reg: GROUP11_MOV, base, offset); | 
| 1181 |         m_formatter.immediate32(imm); | 
| 1182 |     } | 
| 1183 |      | 
| 1184 |     void movl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale) | 
| 1185 |     { | 
| 1186 |         m_formatter.oneByteOp(opcode: OP_GROUP11_EvIz, reg: GROUP11_MOV, base, index, scale, offset); | 
| 1187 |         m_formatter.immediate32(imm); | 
| 1188 |     } | 
| 1189 |  | 
| 1190 | #if !CPU(X86_64) | 
| 1191 |     void movb_i8m(int imm, const void* addr) | 
| 1192 |     { | 
| 1193 |         ASSERT(-128 <= imm && imm < 128); | 
| 1194 |         m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, addr); | 
| 1195 |         m_formatter.immediate8(imm); | 
| 1196 |     } | 
| 1197 | #endif | 
| 1198 |  | 
| 1199 |     void movb_i8m(int imm, int offset, RegisterID base) | 
| 1200 |     { | 
| 1201 |         ASSERT(-128 <= imm && imm < 128); | 
| 1202 |         m_formatter.oneByteOp(opcode: OP_GROUP11_EvIb, reg: GROUP11_MOV, base, offset); | 
| 1203 |         m_formatter.immediate8(imm); | 
| 1204 |     } | 
| 1205 |  | 
| 1206 |     void movb_i8m(int imm, int offset, RegisterID base, RegisterID index, int scale) | 
| 1207 |     { | 
| 1208 |         ASSERT(-128 <= imm && imm < 128); | 
| 1209 |         m_formatter.oneByteOp(opcode: OP_GROUP11_EvIb, reg: GROUP11_MOV, base, index, scale, offset); | 
| 1210 |         m_formatter.immediate8(imm); | 
| 1211 |     } | 
| 1212 |      | 
| 1213 |     void movb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) | 
| 1214 |     { | 
| 1215 |         m_formatter.oneByteOp8(opcode: OP_MOV_EbGb, reg: src, base, index, scale, offset); | 
| 1216 |     } | 
| 1217 |      | 
| 1218 |     void movw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) | 
| 1219 |     { | 
| 1220 |         m_formatter.prefix(pre: PRE_OPERAND_SIZE); | 
| 1221 |         m_formatter.oneByteOp8(opcode: OP_MOV_EvGv, reg: src, base, index, scale, offset); | 
| 1222 |     } | 
| 1223 |  | 
| 1224 |     void movl_EAXm(const void* addr) | 
| 1225 |     { | 
| 1226 |         m_formatter.oneByteOp(opcode: OP_MOV_OvEAX); | 
| 1227 | #if CPU(X86_64) | 
| 1228 |         m_formatter.immediate64(imm: reinterpret_cast<int64_t>(addr)); | 
| 1229 | #else | 
| 1230 |         m_formatter.immediate32(reinterpret_cast<int>(addr)); | 
| 1231 | #endif | 
| 1232 |     } | 
| 1233 |  | 
| 1234 | #if CPU(X86_64) | 
| 1235 |     void movq_rr(RegisterID src, RegisterID dst) | 
| 1236 |     { | 
| 1237 |         m_formatter.oneByteOp64(opcode: OP_MOV_EvGv, reg: src, rm: dst); | 
| 1238 |     } | 
| 1239 |  | 
| 1240 |     void movq_rm(RegisterID src, int offset, RegisterID base) | 
| 1241 |     { | 
| 1242 |         m_formatter.oneByteOp64(opcode: OP_MOV_EvGv, reg: src, base, offset); | 
| 1243 |     } | 
| 1244 |  | 
| 1245 |     void movq_rm_disp32(RegisterID src, int offset, RegisterID base) | 
| 1246 |     { | 
| 1247 |         m_formatter.oneByteOp64_disp32(opcode: OP_MOV_EvGv, reg: src, base, offset); | 
| 1248 |     } | 
| 1249 |  | 
| 1250 |     void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) | 
| 1251 |     { | 
| 1252 |         m_formatter.oneByteOp64(opcode: OP_MOV_EvGv, reg: src, base, index, scale, offset); | 
| 1253 |     } | 
| 1254 |  | 
| 1255 |     void movq_mEAX(const void* addr) | 
| 1256 |     { | 
| 1257 |         m_formatter.oneByteOp64(opcode: OP_MOV_EAXOv); | 
| 1258 |         m_formatter.immediate64(imm: reinterpret_cast<int64_t>(addr)); | 
| 1259 |     } | 
| 1260 |  | 
| 1261 |     void movq_EAXm(const void* addr) | 
| 1262 |     { | 
| 1263 |         m_formatter.oneByteOp64(opcode: OP_MOV_OvEAX); | 
| 1264 |         m_formatter.immediate64(imm: reinterpret_cast<int64_t>(addr)); | 
| 1265 |     } | 
| 1266 |  | 
| 1267 |     void movq_mr(int offset, RegisterID base, RegisterID dst) | 
| 1268 |     { | 
| 1269 |         m_formatter.oneByteOp64(opcode: OP_MOV_GvEv, reg: dst, base, offset); | 
| 1270 |     } | 
| 1271 |  | 
| 1272 |     void movq_mr_disp32(int offset, RegisterID base, RegisterID dst) | 
| 1273 |     { | 
| 1274 |         m_formatter.oneByteOp64_disp32(opcode: OP_MOV_GvEv, reg: dst, base, offset); | 
| 1275 |     } | 
| 1276 |  | 
| 1277 |     void movq_mr_disp8(int offset, RegisterID base, RegisterID dst) | 
| 1278 |     { | 
| 1279 |         m_formatter.oneByteOp64_disp8(opcode: OP_MOV_GvEv, reg: dst, base, offset); | 
| 1280 |     } | 
| 1281 |  | 
| 1282 |     void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) | 
| 1283 |     { | 
| 1284 |         m_formatter.oneByteOp64(opcode: OP_MOV_GvEv, reg: dst, base, index, scale, offset); | 
| 1285 |     } | 
| 1286 |  | 
| 1287 |     void movq_i32m(int imm, int offset, RegisterID base) | 
| 1288 |     { | 
| 1289 |         m_formatter.oneByteOp64(opcode: OP_GROUP11_EvIz, reg: GROUP11_MOV, base, offset); | 
| 1290 |         m_formatter.immediate32(imm); | 
| 1291 |     } | 
| 1292 |  | 
| 1293 |     void movq_i64r(int64_t imm, RegisterID dst) | 
| 1294 |     { | 
| 1295 |         m_formatter.oneByteOp64(opcode: OP_MOV_EAXIv, reg: dst); | 
| 1296 |         m_formatter.immediate64(imm); | 
| 1297 |     } | 
| 1298 |      | 
| 1299 |     void movsxd_rr(RegisterID src, RegisterID dst) | 
| 1300 |     { | 
| 1301 |         m_formatter.oneByteOp64(opcode: OP_MOVSXD_GvEv, reg: dst, rm: src); | 
| 1302 |     } | 
| 1303 |      | 
| 1304 |      | 
| 1305 | #else | 
| 1306 |     void movl_rm(RegisterID src, const void* addr) | 
| 1307 |     { | 
| 1308 |         if (src == X86Registers::eax) | 
| 1309 |             movl_EAXm(addr); | 
| 1310 |         else  | 
| 1311 |             m_formatter.oneByteOp(OP_MOV_EvGv, src, addr); | 
| 1312 |     } | 
| 1313 |      | 
| 1314 |     void movl_mr(const void* addr, RegisterID dst) | 
| 1315 |     { | 
| 1316 |         if (dst == X86Registers::eax) | 
| 1317 |             movl_mEAX(addr); | 
| 1318 |         else | 
| 1319 |             m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr); | 
| 1320 |     } | 
| 1321 |  | 
| 1322 |     void movl_i32m(int imm, const void* addr) | 
| 1323 |     { | 
| 1324 |         m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr); | 
| 1325 |         m_formatter.immediate32(imm); | 
| 1326 |     } | 
| 1327 | #endif | 
| 1328 |  | 
| 1329 |     void movzwl_mr(int offset, RegisterID base, RegisterID dst) | 
| 1330 |     { | 
| 1331 |         m_formatter.twoByteOp(opcode: OP2_MOVZX_GvEw, reg: dst, base, offset); | 
| 1332 |     } | 
| 1333 |  | 
| 1334 |     void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) | 
| 1335 |     { | 
| 1336 |         m_formatter.twoByteOp(opcode: OP2_MOVZX_GvEw, reg: dst, base, index, scale, offset); | 
| 1337 |     } | 
| 1338 |  | 
| 1339 |     void movswl_mr(int offset, RegisterID base, RegisterID dst) | 
| 1340 |     { | 
| 1341 |         m_formatter.twoByteOp(opcode: OP2_MOVSX_GvEw, reg: dst, base, offset); | 
| 1342 |     } | 
| 1343 |  | 
| 1344 |     void movswl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) | 
| 1345 |     { | 
| 1346 |         m_formatter.twoByteOp(opcode: OP2_MOVSX_GvEw, reg: dst, base, index, scale, offset); | 
| 1347 |     } | 
| 1348 |  | 
| 1349 |     void movzbl_mr(int offset, RegisterID base, RegisterID dst) | 
| 1350 |     { | 
| 1351 |         m_formatter.twoByteOp(opcode: OP2_MOVZX_GvEb, reg: dst, base, offset); | 
| 1352 |     } | 
| 1353 |      | 
| 1354 |     void movzbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) | 
| 1355 |     { | 
| 1356 |         m_formatter.twoByteOp(opcode: OP2_MOVZX_GvEb, reg: dst, base, index, scale, offset); | 
| 1357 |     } | 
| 1358 |  | 
| 1359 |     void movsbl_mr(int offset, RegisterID base, RegisterID dst) | 
| 1360 |     { | 
| 1361 |         m_formatter.twoByteOp(opcode: OP2_MOVSX_GvEb, reg: dst, base, offset); | 
| 1362 |     } | 
| 1363 |      | 
| 1364 |     void movsbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) | 
| 1365 |     { | 
| 1366 |         m_formatter.twoByteOp(opcode: OP2_MOVSX_GvEb, reg: dst, base, index, scale, offset); | 
| 1367 |     } | 
| 1368 |  | 
| 1369 |     void movzbl_rr(RegisterID src, RegisterID dst) | 
| 1370 |     { | 
| 1371 |         // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register | 
| 1372 |         // is in the range ESP-EDI, and the src would not have required a REX).  Unneeded | 
| 1373 |         // REX prefixes are defined to be silently ignored by the processor. | 
| 1374 |         m_formatter.twoByteOp8(opcode: OP2_MOVZX_GvEb, reg: dst, rm: src); | 
| 1375 |     } | 
| 1376 |  | 
| 1377 |     void leal_mr(int offset, RegisterID base, RegisterID dst) | 
| 1378 |     { | 
| 1379 |         m_formatter.oneByteOp(opcode: OP_LEA, reg: dst, base, offset); | 
| 1380 |     } | 
| 1381 |  | 
| 1382 |     void leal_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) | 
| 1383 |     { | 
| 1384 |         m_formatter.oneByteOp(opcode: OP_LEA, reg: dst, base, index, scale, offset); | 
| 1385 |     } | 
| 1386 |  | 
| 1387 | #if CPU(X86_64) | 
| 1388 |     void leaq_mr(int offset, RegisterID base, RegisterID dst) | 
| 1389 |     { | 
| 1390 |         m_formatter.oneByteOp64(opcode: OP_LEA, reg: dst, base, offset); | 
| 1391 |     } | 
| 1392 |  | 
| 1393 |     void leaq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) | 
| 1394 |     { | 
| 1395 |         m_formatter.oneByteOp64(opcode: OP_LEA, reg: dst, base, index, scale, offset); | 
| 1396 |     } | 
| 1397 | #endif | 
| 1398 |  | 
| 1399 |     // Flow control: | 
| 1400 |  | 
| 1401 |     AssemblerLabel call() | 
| 1402 |     { | 
| 1403 |         m_formatter.oneByteOp(opcode: OP_CALL_rel32); | 
| 1404 |         return m_formatter.immediateRel32(); | 
| 1405 |     } | 
| 1406 |      | 
| 1407 |     AssemblerLabel call(RegisterID dst) | 
| 1408 |     { | 
| 1409 |         m_formatter.oneByteOp(opcode: OP_GROUP5_Ev, reg: GROUP5_OP_CALLN, rm: dst); | 
| 1410 |         return m_formatter.label(); | 
| 1411 |     } | 
| 1412 |      | 
| 1413 |     void call_m(int offset, RegisterID base) | 
| 1414 |     { | 
| 1415 |         m_formatter.oneByteOp(opcode: OP_GROUP5_Ev, reg: GROUP5_OP_CALLN, base, offset); | 
| 1416 |     } | 
| 1417 |  | 
| 1418 |     AssemblerLabel jmp() | 
| 1419 |     { | 
| 1420 |         m_formatter.oneByteOp(opcode: OP_JMP_rel32); | 
| 1421 |         return m_formatter.immediateRel32(); | 
| 1422 |     } | 
| 1423 |      | 
| 1424 |     // Return a AssemblerLabel so we have a label to the jump, so we can use this | 
| 1425 |     // To make a tail recursive call on x86-64.  The MacroAssembler | 
| 1426 |     // really shouldn't wrap this as a Jump, since it can't be linked. :-/ | 
| 1427 |     AssemblerLabel jmp_r(RegisterID dst) | 
| 1428 |     { | 
| 1429 |         m_formatter.oneByteOp(opcode: OP_GROUP5_Ev, reg: GROUP5_OP_JMPN, rm: dst); | 
| 1430 |         return m_formatter.label(); | 
| 1431 |     } | 
| 1432 |      | 
| 1433 |     void jmp_m(int offset, RegisterID base) | 
| 1434 |     { | 
| 1435 |         m_formatter.oneByteOp(opcode: OP_GROUP5_Ev, reg: GROUP5_OP_JMPN, base, offset); | 
| 1436 |     } | 
| 1437 |      | 
| 1438 | #if !CPU(X86_64) | 
| 1439 |     void jmp_m(const void* address) | 
| 1440 |     { | 
| 1441 |         m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, address); | 
| 1442 |     } | 
| 1443 | #endif | 
| 1444 |  | 
| 1445 |     AssemblerLabel jne() | 
| 1446 |     { | 
| 1447 |         m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionNE)); | 
| 1448 |         return m_formatter.immediateRel32(); | 
| 1449 |     } | 
| 1450 |      | 
| 1451 |     AssemblerLabel jnz() | 
| 1452 |     { | 
| 1453 |         return jne(); | 
| 1454 |     } | 
| 1455 |  | 
| 1456 |     AssemblerLabel je() | 
| 1457 |     { | 
| 1458 |         m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionE)); | 
| 1459 |         return m_formatter.immediateRel32(); | 
| 1460 |     } | 
| 1461 |      | 
| 1462 |     AssemblerLabel jz() | 
| 1463 |     { | 
| 1464 |         return je(); | 
| 1465 |     } | 
| 1466 |  | 
| 1467 |     AssemblerLabel jl() | 
| 1468 |     { | 
| 1469 |         m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionL)); | 
| 1470 |         return m_formatter.immediateRel32(); | 
| 1471 |     } | 
| 1472 |      | 
| 1473 |     AssemblerLabel jb() | 
| 1474 |     { | 
| 1475 |         m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionB)); | 
| 1476 |         return m_formatter.immediateRel32(); | 
| 1477 |     } | 
| 1478 |      | 
| 1479 |     AssemblerLabel jle() | 
| 1480 |     { | 
| 1481 |         m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionLE)); | 
| 1482 |         return m_formatter.immediateRel32(); | 
| 1483 |     } | 
| 1484 |      | 
| 1485 |     AssemblerLabel jbe() | 
| 1486 |     { | 
| 1487 |         m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionBE)); | 
| 1488 |         return m_formatter.immediateRel32(); | 
| 1489 |     } | 
| 1490 |      | 
| 1491 |     AssemblerLabel jge() | 
| 1492 |     { | 
| 1493 |         m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionGE)); | 
| 1494 |         return m_formatter.immediateRel32(); | 
| 1495 |     } | 
| 1496 |  | 
| 1497 |     AssemblerLabel jg() | 
| 1498 |     { | 
| 1499 |         m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionG)); | 
| 1500 |         return m_formatter.immediateRel32(); | 
| 1501 |     } | 
| 1502 |  | 
| 1503 |     AssemblerLabel ja() | 
| 1504 |     { | 
| 1505 |         m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionA)); | 
| 1506 |         return m_formatter.immediateRel32(); | 
| 1507 |     } | 
| 1508 |      | 
| 1509 |     AssemblerLabel jae() | 
| 1510 |     { | 
| 1511 |         m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionAE)); | 
| 1512 |         return m_formatter.immediateRel32(); | 
| 1513 |     } | 
| 1514 |      | 
| 1515 |     AssemblerLabel jo() | 
| 1516 |     { | 
| 1517 |         m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionO)); | 
| 1518 |         return m_formatter.immediateRel32(); | 
| 1519 |     } | 
| 1520 |  | 
| 1521 |     AssemblerLabel jnp() | 
| 1522 |     { | 
| 1523 |         m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionNP)); | 
| 1524 |         return m_formatter.immediateRel32(); | 
| 1525 |     } | 
| 1526 |  | 
| 1527 |     AssemblerLabel jp() | 
| 1528 |     { | 
| 1529 |         m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionP)); | 
| 1530 |         return m_formatter.immediateRel32(); | 
| 1531 |     } | 
| 1532 |      | 
| 1533 |     AssemblerLabel js() | 
| 1534 |     { | 
| 1535 |         m_formatter.twoByteOp(opcode: jccRel32(cond: ConditionS)); | 
| 1536 |         return m_formatter.immediateRel32(); | 
| 1537 |     } | 
| 1538 |  | 
| 1539 |     AssemblerLabel jCC(Condition cond) | 
| 1540 |     { | 
| 1541 |         m_formatter.twoByteOp(opcode: jccRel32(cond)); | 
| 1542 |         return m_formatter.immediateRel32(); | 
| 1543 |     } | 
| 1544 |  | 
| 1545 |     // SSE operations: | 
| 1546 |  | 
| 1547 |     void addsd_rr(XMMRegisterID src, XMMRegisterID dst) | 
| 1548 |     { | 
| 1549 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1550 |         m_formatter.twoByteOp(opcode: OP2_ADDSD_VsdWsd, reg: (RegisterID)dst, rm: (RegisterID)src); | 
| 1551 |     } | 
| 1552 |  | 
| 1553 |     void addsd_mr(int offset, RegisterID base, XMMRegisterID dst) | 
| 1554 |     { | 
| 1555 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1556 |         m_formatter.twoByteOp(opcode: OP2_ADDSD_VsdWsd, reg: (RegisterID)dst, base, offset); | 
| 1557 |     } | 
| 1558 |  | 
| 1559 | #if !CPU(X86_64) | 
| 1560 |     void addsd_mr(const void* address, XMMRegisterID dst) | 
| 1561 |     { | 
| 1562 |         m_formatter.prefix(PRE_SSE_F2); | 
| 1563 |         m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, address); | 
| 1564 |     } | 
| 1565 | #endif | 
| 1566 |  | 
| 1567 |     void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst) | 
| 1568 |     { | 
| 1569 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1570 |         m_formatter.twoByteOp(opcode: OP2_CVTSI2SD_VsdEd, reg: (RegisterID)dst, rm: src); | 
| 1571 |     } | 
| 1572 |  | 
| 1573 |     void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst) | 
| 1574 |     { | 
| 1575 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1576 |         m_formatter.twoByteOp(opcode: OP2_CVTSI2SD_VsdEd, reg: (RegisterID)dst, base, offset); | 
| 1577 |     } | 
| 1578 |  | 
| 1579 | #if !CPU(X86_64) | 
| 1580 |     void cvtsi2sd_mr(const void* address, XMMRegisterID dst) | 
| 1581 |     { | 
| 1582 |         m_formatter.prefix(PRE_SSE_F2); | 
| 1583 |         m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address); | 
| 1584 |     } | 
| 1585 | #endif | 
| 1586 |  | 
| 1587 | #if CPU(X86_64) | 
| 1588 |     void cvtsiq2sd_rr(RegisterID src, FPRegisterID dst) | 
| 1589 |     { | 
| 1590 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1591 |         m_formatter.twoByteOp64(opcode: OP2_CVTSI2SD_VsdEd, reg: (RegisterID)dst, rm: src); | 
| 1592 |     } | 
| 1593 |  | 
| 1594 | #endif | 
| 1595 |  | 
| 1596 |     void cvttsd2si_rr(XMMRegisterID src, RegisterID dst) | 
| 1597 |     { | 
| 1598 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1599 |         m_formatter.twoByteOp(opcode: OP2_CVTTSD2SI_GdWsd, reg: dst, rm: (RegisterID)src); | 
| 1600 |     } | 
| 1601 |  | 
| 1602 |     void cvtsd2ss_rr(XMMRegisterID src, XMMRegisterID dst) | 
| 1603 |     { | 
| 1604 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1605 |         m_formatter.twoByteOp(opcode: OP2_CVTSD2SS_VsdWsd, reg: dst, rm: (RegisterID)src); | 
| 1606 |     } | 
| 1607 |  | 
| 1608 |     void cvtss2sd_rr(XMMRegisterID src, XMMRegisterID dst) | 
| 1609 |     { | 
| 1610 |         m_formatter.prefix(pre: PRE_SSE_F3); | 
| 1611 |         m_formatter.twoByteOp(opcode: OP2_CVTSS2SD_VsdWsd, reg: dst, rm: (RegisterID)src); | 
| 1612 |     } | 
| 1613 |      | 
| 1614 | #if CPU(X86_64) | 
| 1615 |     void cvttsd2siq_rr(XMMRegisterID src, RegisterID dst) | 
| 1616 |     { | 
| 1617 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1618 |         m_formatter.twoByteOp64(opcode: OP2_CVTTSD2SI_GdWsd, reg: dst, rm: (RegisterID)src); | 
| 1619 |     } | 
| 1620 | #endif | 
| 1621 |  | 
| 1622 |     void movd_rr(XMMRegisterID src, RegisterID dst) | 
| 1623 |     { | 
| 1624 |         m_formatter.prefix(pre: PRE_SSE_66); | 
| 1625 |         m_formatter.twoByteOp(opcode: OP2_MOVD_EdVd, reg: (RegisterID)src, rm: dst); | 
| 1626 |     } | 
| 1627 |  | 
| 1628 |     void movd_rr(RegisterID src, XMMRegisterID dst) | 
| 1629 |     { | 
| 1630 |         m_formatter.prefix(pre: PRE_SSE_66); | 
| 1631 |         m_formatter.twoByteOp(opcode: OP2_MOVD_VdEd, reg: (RegisterID)dst, rm: src); | 
| 1632 |     } | 
| 1633 |  | 
| 1634 | #if CPU(X86_64) | 
| 1635 |     void movq_rr(XMMRegisterID src, RegisterID dst) | 
| 1636 |     { | 
| 1637 |         m_formatter.prefix(pre: PRE_SSE_66); | 
| 1638 |         m_formatter.twoByteOp64(opcode: OP2_MOVD_EdVd, reg: (RegisterID)src, rm: dst); | 
| 1639 |     } | 
| 1640 |  | 
| 1641 |     void movq_rr(RegisterID src, XMMRegisterID dst) | 
| 1642 |     { | 
| 1643 |         m_formatter.prefix(pre: PRE_SSE_66); | 
| 1644 |         m_formatter.twoByteOp64(opcode: OP2_MOVD_VdEd, reg: (RegisterID)dst, rm: src); | 
| 1645 |     } | 
| 1646 | #endif | 
| 1647 |  | 
| 1648 |     void movsd_rr(XMMRegisterID src, XMMRegisterID dst) | 
| 1649 |     { | 
| 1650 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1651 |         m_formatter.twoByteOp(opcode: OP2_MOVSD_VsdWsd, reg: (RegisterID)dst, rm: (RegisterID)src); | 
| 1652 |     } | 
| 1653 |  | 
| 1654 |     void movsd_rm(XMMRegisterID src, int offset, RegisterID base) | 
| 1655 |     { | 
| 1656 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1657 |         m_formatter.twoByteOp(opcode: OP2_MOVSD_WsdVsd, reg: (RegisterID)src, base, offset); | 
| 1658 |     } | 
| 1659 |      | 
| 1660 |     void movsd_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale) | 
| 1661 |     { | 
| 1662 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1663 |         m_formatter.twoByteOp(opcode: OP2_MOVSD_WsdVsd, reg: (RegisterID)src, base, index, scale, offset); | 
| 1664 |     } | 
| 1665 |      | 
| 1666 |     void movss_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale) | 
| 1667 |     { | 
| 1668 |         m_formatter.prefix(pre: PRE_SSE_F3); | 
| 1669 |         m_formatter.twoByteOp(opcode: OP2_MOVSD_WsdVsd, reg: (RegisterID)src, base, index, scale, offset); | 
| 1670 |     } | 
| 1671 |      | 
| 1672 |     void movsd_mr(int offset, RegisterID base, XMMRegisterID dst) | 
| 1673 |     { | 
| 1674 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1675 |         m_formatter.twoByteOp(opcode: OP2_MOVSD_VsdWsd, reg: (RegisterID)dst, base, offset); | 
| 1676 |     } | 
| 1677 |  | 
| 1678 |     void movsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) | 
| 1679 |     { | 
| 1680 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1681 |         m_formatter.twoByteOp(opcode: OP2_MOVSD_VsdWsd, reg: dst, base, index, scale, offset); | 
| 1682 |     } | 
| 1683 |      | 
| 1684 |     void movss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) | 
| 1685 |     { | 
| 1686 |         m_formatter.prefix(pre: PRE_SSE_F3); | 
| 1687 |         m_formatter.twoByteOp(opcode: OP2_MOVSD_VsdWsd, reg: dst, base, index, scale, offset); | 
| 1688 |     } | 
| 1689 |  | 
| 1690 | #if !CPU(X86_64) | 
| 1691 |     void movsd_mr(const void* address, XMMRegisterID dst) | 
| 1692 |     { | 
| 1693 |         m_formatter.prefix(PRE_SSE_F2); | 
| 1694 |         m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address); | 
| 1695 |     } | 
| 1696 |     void movsd_rm(XMMRegisterID src, const void* address) | 
| 1697 |     { | 
| 1698 |         m_formatter.prefix(PRE_SSE_F2); | 
| 1699 |         m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, address); | 
| 1700 |     } | 
| 1701 | #endif | 
| 1702 |  | 
| 1703 |     void mulsd_rr(XMMRegisterID src, XMMRegisterID dst) | 
| 1704 |     { | 
| 1705 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1706 |         m_formatter.twoByteOp(opcode: OP2_MULSD_VsdWsd, reg: (RegisterID)dst, rm: (RegisterID)src); | 
| 1707 |     } | 
| 1708 |  | 
| 1709 |     void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst) | 
| 1710 |     { | 
| 1711 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1712 |         m_formatter.twoByteOp(opcode: OP2_MULSD_VsdWsd, reg: (RegisterID)dst, base, offset); | 
| 1713 |     } | 
| 1714 |  | 
| 1715 |     void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst) | 
| 1716 |     { | 
| 1717 |         m_formatter.prefix(pre: PRE_SSE_66); | 
| 1718 |         m_formatter.twoByteOp(opcode: OP2_PEXTRW_GdUdIb, reg: (RegisterID)dst, rm: (RegisterID)src); | 
| 1719 |         m_formatter.immediate8(imm: whichWord); | 
| 1720 |     } | 
| 1721 |  | 
| 1722 |     void psllq_i8r(int imm, XMMRegisterID dst) | 
| 1723 |     { | 
| 1724 |         m_formatter.prefix(pre: PRE_SSE_66); | 
| 1725 |         m_formatter.twoByteOp8(opcode: OP2_PSLLQ_UdqIb, groupOp: GROUP14_OP_PSLLQ, rm: (RegisterID)dst); | 
| 1726 |         m_formatter.immediate8(imm); | 
| 1727 |     } | 
| 1728 |  | 
| 1729 |     void psrlq_i8r(int imm, XMMRegisterID dst) | 
| 1730 |     { | 
| 1731 |         m_formatter.prefix(pre: PRE_SSE_66); | 
| 1732 |         m_formatter.twoByteOp8(opcode: OP2_PSRLQ_UdqIb, groupOp: GROUP14_OP_PSRLQ, rm: (RegisterID)dst); | 
| 1733 |         m_formatter.immediate8(imm); | 
| 1734 |     } | 
| 1735 |  | 
| 1736 |     void por_rr(XMMRegisterID src, XMMRegisterID dst) | 
| 1737 |     { | 
| 1738 |         m_formatter.prefix(pre: PRE_SSE_66); | 
| 1739 |         m_formatter.twoByteOp(opcode: OP2_POR_VdqWdq, reg: (RegisterID)dst, rm: (RegisterID)src); | 
| 1740 |     } | 
| 1741 |  | 
| 1742 |     void subsd_rr(XMMRegisterID src, XMMRegisterID dst) | 
| 1743 |     { | 
| 1744 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1745 |         m_formatter.twoByteOp(opcode: OP2_SUBSD_VsdWsd, reg: (RegisterID)dst, rm: (RegisterID)src); | 
| 1746 |     } | 
| 1747 |  | 
| 1748 |     void subsd_mr(int offset, RegisterID base, XMMRegisterID dst) | 
| 1749 |     { | 
| 1750 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1751 |         m_formatter.twoByteOp(opcode: OP2_SUBSD_VsdWsd, reg: (RegisterID)dst, base, offset); | 
| 1752 |     } | 
| 1753 |  | 
| 1754 |     void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst) | 
| 1755 |     { | 
| 1756 |         m_formatter.prefix(pre: PRE_SSE_66); | 
| 1757 |         m_formatter.twoByteOp(opcode: OP2_UCOMISD_VsdWsd, reg: (RegisterID)dst, rm: (RegisterID)src); | 
| 1758 |     } | 
| 1759 |  | 
| 1760 |     void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst) | 
| 1761 |     { | 
| 1762 |         m_formatter.prefix(pre: PRE_SSE_66); | 
| 1763 |         m_formatter.twoByteOp(opcode: OP2_UCOMISD_VsdWsd, reg: (RegisterID)dst, base, offset); | 
| 1764 |     } | 
| 1765 |  | 
| 1766 |     void divsd_rr(XMMRegisterID src, XMMRegisterID dst) | 
| 1767 |     { | 
| 1768 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1769 |         m_formatter.twoByteOp(opcode: OP2_DIVSD_VsdWsd, reg: (RegisterID)dst, rm: (RegisterID)src); | 
| 1770 |     } | 
| 1771 |  | 
| 1772 |     void divsd_mr(int offset, RegisterID base, XMMRegisterID dst) | 
| 1773 |     { | 
| 1774 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1775 |         m_formatter.twoByteOp(opcode: OP2_DIVSD_VsdWsd, reg: (RegisterID)dst, base, offset); | 
| 1776 |     } | 
| 1777 |  | 
| 1778 |     void xorpd_rr(XMMRegisterID src, XMMRegisterID dst) | 
| 1779 |     { | 
| 1780 |         m_formatter.prefix(pre: PRE_SSE_66); | 
| 1781 |         m_formatter.twoByteOp(opcode: OP2_XORPD_VpdWpd, reg: (RegisterID)dst, rm: (RegisterID)src); | 
| 1782 |     } | 
| 1783 |  | 
| 1784 |     void andnpd_rr(XMMRegisterID src, XMMRegisterID dst) | 
| 1785 |     { | 
| 1786 |         m_formatter.prefix(pre: PRE_SSE_66); | 
| 1787 |         m_formatter.twoByteOp(opcode: OP2_ANDNPD_VpdWpd, reg: (RegisterID)dst, rm: (RegisterID)src); | 
| 1788 |     } | 
| 1789 |  | 
| 1790 |     void sqrtsd_rr(XMMRegisterID src, XMMRegisterID dst) | 
| 1791 |     { | 
| 1792 |         m_formatter.prefix(pre: PRE_SSE_F2); | 
| 1793 |         m_formatter.twoByteOp(opcode: OP2_SQRTSD_VsdWsd, reg: (RegisterID)dst, rm: (RegisterID)src); | 
| 1794 |     } | 
| 1795 |  | 
| 1796 |     // Misc instructions: | 
| 1797 |  | 
| 1798 |     void int3() | 
| 1799 |     { | 
| 1800 |         m_formatter.oneByteOp(opcode: OP_INT3); | 
| 1801 |     } | 
| 1802 |      | 
| 1803 |     void ret() | 
| 1804 |     { | 
| 1805 |         m_formatter.oneByteOp(opcode: OP_RET); | 
| 1806 |     } | 
| 1807 |  | 
| 1808 |     void predictNotTaken() | 
| 1809 |     { | 
| 1810 |         m_formatter.prefix(pre: PRE_PREDICT_BRANCH_NOT_TAKEN); | 
| 1811 |     } | 
| 1812 |  | 
| 1813 |     // Assembler admin methods: | 
| 1814 |  | 
| 1815 |     size_t codeSize() const | 
| 1816 |     { | 
| 1817 |         return m_formatter.codeSize(); | 
| 1818 |     } | 
| 1819 |      | 
| 1820 |     AssemblerLabel labelForWatchpoint() | 
| 1821 |     { | 
| 1822 |         AssemblerLabel result = m_formatter.label(); | 
| 1823 |         if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint) | 
| 1824 |             result = label(); | 
| 1825 |         m_indexOfLastWatchpoint = result.m_offset; | 
| 1826 |         m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize(); | 
| 1827 |         return result; | 
| 1828 |     } | 
| 1829 |      | 
| 1830 |     AssemblerLabel labelIgnoringWatchpoints() | 
| 1831 |     { | 
| 1832 |         return m_formatter.label(); | 
| 1833 |     } | 
| 1834 |  | 
| 1835 |     AssemblerLabel label() | 
| 1836 |     { | 
| 1837 |         AssemblerLabel result = m_formatter.label(); | 
| 1838 |         while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) { | 
| 1839 |             nop(); | 
| 1840 |             result = m_formatter.label(); | 
| 1841 |         } | 
| 1842 |         return result; | 
| 1843 |     } | 
| 1844 |  | 
| 1845 |     AssemblerLabel align(int alignment) | 
| 1846 |     { | 
| 1847 |         while (!m_formatter.isAligned(alignment)) | 
| 1848 |             m_formatter.oneByteOp(opcode: OP_HLT); | 
| 1849 |  | 
| 1850 |         return label(); | 
| 1851 |     } | 
| 1852 |  | 
| 1853 |     // Linking & patching: | 
| 1854 |     // | 
| 1855 |     // 'link' and 'patch' methods are for use on unprotected code - such as the code | 
| 1856 |     // within the AssemblerBuffer, and code being patched by the patch buffer.  Once | 
| 1857 |     // code has been finalized it is (platform support permitting) within a non- | 
| 1858 |     // writable region of memory; to modify the code in an execute-only execuable | 
| 1859 |     // pool the 'repatch' and 'relink' methods should be used. | 
| 1860 |  | 
| 1861 |     void linkJump(AssemblerLabel from, AssemblerLabel to) | 
| 1862 |     { | 
| 1863 |         ASSERT(from.isSet()); | 
| 1864 |         ASSERT(to.isSet()); | 
| 1865 |  | 
| 1866 |         char* code = reinterpret_cast<char*>(m_formatter.data()); | 
| 1867 |         ASSERT(!loadPossiblyUnaligned<int32_t>(code, from.m_offset, -1)); | 
| 1868 |         setRel32(from: code + from.m_offset, to: code + to.m_offset); | 
| 1869 |     } | 
| 1870 |  | 
| 1871 |     template<typename T> | 
| 1872 |     T loadPossiblyUnaligned(char *ptr, size_t offset, int idx) | 
| 1873 |     { | 
| 1874 |         T *t_ptr = &reinterpret_cast<T*>(ptr + offset)[idx]; | 
| 1875 |         T val; | 
| 1876 |         memcpy(&val, t_ptr, sizeof(T)); | 
| 1877 |         return val; | 
| 1878 |     } | 
| 1879 |      | 
| 1880 |     static void linkJump(void* code, AssemblerLabel from, void* to) | 
| 1881 |     { | 
| 1882 |         ASSERT(from.isSet()); | 
| 1883 |  | 
| 1884 |         setRel32(from: reinterpret_cast<char*>(code) + from.m_offset, to); | 
| 1885 |     } | 
| 1886 |  | 
| 1887 |     static void linkCall(void* code, AssemblerLabel from, void* to) | 
| 1888 |     { | 
| 1889 |         ASSERT(from.isSet()); | 
| 1890 |  | 
| 1891 |         setRel32(from: reinterpret_cast<char*>(code) + from.m_offset, to); | 
| 1892 |     } | 
| 1893 |  | 
| 1894 |     static void linkPointer(void* code, AssemblerLabel where, void* value) | 
| 1895 |     { | 
| 1896 |         ASSERT(where.isSet()); | 
| 1897 |  | 
| 1898 |         setPointer(where: reinterpret_cast<char*>(code) + where.m_offset, value); | 
| 1899 |     } | 
| 1900 |  | 
| 1901 |     static void relinkJump(void* from, void* to) | 
| 1902 |     { | 
| 1903 |         setRel32(from, to); | 
| 1904 |     } | 
| 1905 |      | 
| 1906 |     static void relinkCall(void* from, void* to) | 
| 1907 |     { | 
| 1908 |         setRel32(from, to); | 
| 1909 |     } | 
| 1910 |      | 
| 1911 |     static void repatchCompact(void* where, int32_t value) | 
| 1912 |     { | 
| 1913 |         ASSERT(value >= std::numeric_limits<int8_t>::min()); | 
| 1914 |         ASSERT(value <= std::numeric_limits<int8_t>::max()); | 
| 1915 |         setInt8(where, value); | 
| 1916 |     } | 
| 1917 |  | 
| 1918 |     static void repatchInt32(void* where, int32_t value) | 
| 1919 |     { | 
| 1920 |         setInt32(where, value); | 
| 1921 |     } | 
| 1922 |  | 
| 1923 |     static void repatchPointer(void* where, void* value) | 
| 1924 |     { | 
| 1925 |         setPointer(where, value); | 
| 1926 |     } | 
| 1927 |      | 
| 1928 |     static void* readPointer(void* where) | 
| 1929 |     { | 
| 1930 |         return reinterpret_cast<void**>(where)[-1]; | 
| 1931 |     } | 
| 1932 |  | 
| 1933 |     static void replaceWithJump(void* instructionStart, void* to) | 
| 1934 |     { | 
| 1935 |         uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); | 
| 1936 |         uint8_t* dstPtr = reinterpret_cast<uint8_t*>(to); | 
| 1937 |         intptr_t distance = (intptr_t)(dstPtr - (ptr + 5)); | 
| 1938 |         ptr[0] = static_cast<uint8_t>(OP_JMP_rel32); | 
| 1939 |         *reinterpret_cast<int32_t*>(ptr + 1) = static_cast<int32_t>(distance); | 
| 1940 |     } | 
| 1941 |      | 
| 1942 |     static ptrdiff_t maxJumpReplacementSize() | 
| 1943 |     { | 
| 1944 |         return 5; | 
| 1945 |     } | 
| 1946 |      | 
| 1947 | #if CPU(X86_64) | 
| 1948 |     static void revertJumpTo_movq_i64r(void* instructionStart, int64_t imm, RegisterID dst) | 
| 1949 |     { | 
| 1950 |         const int rexBytes = 1; | 
| 1951 |         const int opcodeBytes = 1; | 
| 1952 |         ASSERT(rexBytes + opcodeBytes <= maxJumpReplacementSize()); | 
| 1953 |         uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); | 
| 1954 |         ptr[0] = PRE_REX | (1 << 3) | (dst >> 3); | 
| 1955 |         ptr[1] = OP_MOV_EAXIv | (dst & 7); | 
| 1956 |          | 
| 1957 |         union { | 
| 1958 |             uint64_t asWord; | 
| 1959 |             uint8_t asBytes[8]; | 
| 1960 |         } u; | 
| 1961 |         u.asWord = imm; | 
| 1962 |         for (unsigned i = rexBytes + opcodeBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i) | 
| 1963 |             ptr[i] = u.asBytes[i - rexBytes - opcodeBytes]; | 
| 1964 |     } | 
| 1965 | #endif | 
| 1966 |      | 
| 1967 |     static void revertJumpTo_cmpl_ir_force32(void* instructionStart, int32_t imm, RegisterID dst) | 
| 1968 |     { | 
| 1969 |         const int opcodeBytes = 1; | 
| 1970 |         const int modRMBytes = 1; | 
| 1971 |         ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize()); | 
| 1972 |         uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); | 
| 1973 |         ptr[0] = OP_GROUP1_EvIz; | 
| 1974 |         ptr[1] = (X86InstructionFormatter::ModRmRegister << 6) | (GROUP1_OP_CMP << 3) | dst; | 
| 1975 |         union { | 
| 1976 |             uint32_t asWord; | 
| 1977 |             uint8_t asBytes[4]; | 
| 1978 |         } u; | 
| 1979 |         u.asWord = imm; | 
| 1980 |         for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i) | 
| 1981 |             ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes]; | 
| 1982 |     } | 
| 1983 |      | 
| 1984 |     static void revertJumpTo_cmpl_im_force32(void* instructionStart, int32_t imm, int offset, RegisterID dst) | 
| 1985 |     { | 
| 1986 |         ASSERT_UNUSED(offset, !offset); | 
| 1987 |         const int opcodeBytes = 1; | 
| 1988 |         const int modRMBytes = 1; | 
| 1989 |         ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize()); | 
| 1990 |         uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); | 
| 1991 |         ptr[0] = OP_GROUP1_EvIz; | 
| 1992 |         ptr[1] = (X86InstructionFormatter::ModRmMemoryNoDisp << 6) | (GROUP1_OP_CMP << 3) | dst; | 
| 1993 |         union { | 
| 1994 |             uint32_t asWord; | 
| 1995 |             uint8_t asBytes[4]; | 
| 1996 |         } u; | 
| 1997 |         u.asWord = imm; | 
| 1998 |         for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i) | 
| 1999 |             ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes]; | 
| 2000 |     } | 
| 2001 |      | 
| 2002 |     static void replaceWithLoad(void* instructionStart) | 
| 2003 |     { | 
| 2004 |         uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); | 
| 2005 | #if CPU(X86_64) | 
| 2006 |         if ((*ptr & ~15) == PRE_REX) | 
| 2007 |             ptr++; | 
| 2008 | #endif | 
| 2009 |         switch (*ptr) { | 
| 2010 |         case OP_MOV_GvEv: | 
| 2011 |             break; | 
| 2012 |         case OP_LEA: | 
| 2013 |             *ptr = OP_MOV_GvEv; | 
| 2014 |             break; | 
| 2015 |         default: | 
| 2016 |             RELEASE_ASSERT_NOT_REACHED(); | 
| 2017 |         } | 
| 2018 |     } | 
| 2019 |      | 
| 2020 |     static void replaceWithAddressComputation(void* instructionStart) | 
| 2021 |     { | 
| 2022 |         uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); | 
| 2023 | #if CPU(X86_64) | 
| 2024 |         if ((*ptr & ~15) == PRE_REX) | 
| 2025 |             ptr++; | 
| 2026 | #endif | 
| 2027 |         switch (*ptr) { | 
| 2028 |         case OP_MOV_GvEv: | 
| 2029 |             *ptr = OP_LEA; | 
| 2030 |             break; | 
| 2031 |         case OP_LEA: | 
| 2032 |             break; | 
| 2033 |         default: | 
| 2034 |             RELEASE_ASSERT_NOT_REACHED(); | 
| 2035 |         } | 
| 2036 |     } | 
| 2037 |      | 
| 2038 |     static unsigned getCallReturnOffset(AssemblerLabel call) | 
| 2039 |     { | 
| 2040 |         ASSERT(call.isSet()); | 
| 2041 |         return call.m_offset; | 
| 2042 |     } | 
| 2043 |  | 
| 2044 |     static void* getRelocatedAddress(void* code, AssemblerLabel label) | 
| 2045 |     { | 
| 2046 |         ASSERT(label.isSet()); | 
| 2047 |         return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset); | 
| 2048 |     } | 
| 2049 |      | 
| 2050 |     static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b) | 
| 2051 |     { | 
| 2052 |         return b.m_offset - a.m_offset; | 
| 2053 |     } | 
| 2054 |      | 
| 2055 |     PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort) | 
| 2056 |     { | 
| 2057 |         return m_formatter.executableCopy(globalData, ownerUID, effort); | 
| 2058 |     } | 
| 2059 |  | 
| 2060 |     unsigned debugOffset() { return m_formatter.debugOffset(); } | 
| 2061 |  | 
| 2062 |     void nop() | 
| 2063 |     { | 
| 2064 |         m_formatter.oneByteOp(opcode: OP_NOP); | 
| 2065 |     } | 
| 2066 |  | 
| 2067 |     // This is a no-op on x86 | 
| 2068 |     ALWAYS_INLINE static void cacheFlush(void*, size_t) { } | 
| 2069 |  | 
| 2070 | private: | 
| 2071 |  | 
| 2072 |     static void setPointer(void* where, void* value) | 
| 2073 |     { | 
| 2074 |         reinterpret_cast<void**>(where)[-1] = value; | 
| 2075 |     } | 
| 2076 |  | 
| 2077 |     static void setInt32(void* where, int32_t value) | 
| 2078 |     { | 
| 2079 |         storePossiblyUnaligned<int32_t>(where, idx: -1, value); | 
| 2080 |     } | 
| 2081 |  | 
| 2082 |     template <typename T> | 
| 2083 |     static void storePossiblyUnaligned(void *where, int idx, T value) | 
| 2084 |     { | 
| 2085 |         T *ptr = &reinterpret_cast<T*>(where)[idx]; | 
| 2086 |         memcpy(ptr, &value, sizeof(T)); | 
| 2087 |     } | 
| 2088 |      | 
| 2089 |     static void setInt8(void* where, int8_t value) | 
| 2090 |     { | 
| 2091 |         reinterpret_cast<int8_t*>(where)[-1] = value; | 
| 2092 |     } | 
| 2093 |  | 
| 2094 |     static void setRel32(void* from, void* to) | 
| 2095 |     { | 
| 2096 |         intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from); | 
| 2097 |         ASSERT(offset == static_cast<int32_t>(offset)); | 
| 2098 |  | 
| 2099 |         setInt32(where: from, value: offset); | 
| 2100 |     } | 
| 2101 |  | 
| 2102 |     class X86InstructionFormatter { | 
| 2103 |  | 
| 2104 |         static const int maxInstructionSize = 16; | 
| 2105 |  | 
| 2106 |     public: | 
| 2107 |  | 
| 2108 |         enum ModRmMode { | 
| 2109 |             ModRmMemoryNoDisp, | 
| 2110 |             ModRmMemoryDisp8, | 
| 2111 |             ModRmMemoryDisp32, | 
| 2112 |             ModRmRegister, | 
| 2113 |         }; | 
| 2114 |  | 
| 2115 |         // Legacy prefix bytes: | 
| 2116 |         // | 
| 2117 |         // These are emmitted prior to the instruction. | 
| 2118 |  | 
| 2119 |         void prefix(OneByteOpcodeID pre) | 
| 2120 |         { | 
| 2121 |             m_buffer.putByte(value: pre); | 
| 2122 |         } | 
| 2123 |  | 
| 2124 |         // Word-sized operands / no operand instruction formatters. | 
| 2125 |         // | 
| 2126 |         // In addition to the opcode, the following operand permutations are supported: | 
| 2127 |         //   * None - instruction takes no operands. | 
| 2128 |         //   * One register - the low three bits of the RegisterID are added into the opcode. | 
| 2129 |         //   * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place). | 
| 2130 |         //   * Three argument ModRM - a register, and a register and an offset describing a memory operand. | 
| 2131 |         //   * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand. | 
| 2132 |         // | 
| 2133 |         // For 32-bit x86 targets, the address operand may also be provided as a void*. | 
| 2134 |         // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used. | 
| 2135 |         // | 
| 2136 |         // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F). | 
| 2137 |  | 
| 2138 |         void oneByteOp(OneByteOpcodeID opcode) | 
| 2139 |         { | 
| 2140 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2141 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2142 |         } | 
| 2143 |  | 
| 2144 |         void oneByteOp(OneByteOpcodeID opcode, RegisterID reg) | 
| 2145 |         { | 
| 2146 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2147 |             emitRexIfNeeded(r: 0, x: 0, b: reg); | 
| 2148 |             m_buffer.putByteUnchecked(value: opcode + (reg & 7)); | 
| 2149 |         } | 
| 2150 |  | 
| 2151 |         void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm) | 
| 2152 |         { | 
| 2153 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2154 |             emitRexIfNeeded(r: reg, x: 0, b: rm); | 
| 2155 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2156 |             registerModRM(reg, rm); | 
| 2157 |         } | 
| 2158 |  | 
| 2159 |         void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) | 
| 2160 |         { | 
| 2161 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2162 |             emitRexIfNeeded(r: reg, x: 0, b: base); | 
| 2163 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2164 |             memoryModRM(reg, base, offset); | 
| 2165 |         } | 
| 2166 |  | 
| 2167 |         void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) | 
| 2168 |         { | 
| 2169 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2170 |             emitRexIfNeeded(r: reg, x: 0, b: base); | 
| 2171 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2172 |             memoryModRM_disp32(reg, base, offset); | 
| 2173 |         } | 
| 2174 |          | 
| 2175 |         void oneByteOp_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) | 
| 2176 |         { | 
| 2177 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2178 |             emitRexIfNeeded(r: reg, x: 0, b: base); | 
| 2179 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2180 |             memoryModRM_disp8(reg, base, offset); | 
| 2181 |         } | 
| 2182 |  | 
| 2183 |         void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset) | 
| 2184 |         { | 
| 2185 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2186 |             emitRexIfNeeded(r: reg, x: index, b: base); | 
| 2187 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2188 |             memoryModRM(reg, base, index, scale, offset); | 
| 2189 |         } | 
| 2190 |  | 
| 2191 | #if !CPU(X86_64) | 
| 2192 |         void oneByteOp(OneByteOpcodeID opcode, int reg, const void* address) | 
| 2193 |         { | 
| 2194 |             m_buffer.ensureSpace(maxInstructionSize); | 
| 2195 |             m_buffer.putByteUnchecked(opcode); | 
| 2196 |             memoryModRM(reg, address); | 
| 2197 |         } | 
| 2198 | #endif | 
| 2199 |  | 
| 2200 |         void twoByteOp(TwoByteOpcodeID opcode) | 
| 2201 |         { | 
| 2202 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2203 |             m_buffer.putByteUnchecked(value: OP_2BYTE_ESCAPE); | 
| 2204 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2205 |         } | 
| 2206 |  | 
| 2207 |         void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm) | 
| 2208 |         { | 
| 2209 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2210 |             emitRexIfNeeded(r: reg, x: 0, b: rm); | 
| 2211 |             m_buffer.putByteUnchecked(value: OP_2BYTE_ESCAPE); | 
| 2212 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2213 |             registerModRM(reg, rm); | 
| 2214 |         } | 
| 2215 |  | 
| 2216 |         void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset) | 
| 2217 |         { | 
| 2218 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2219 |             emitRexIfNeeded(r: reg, x: 0, b: base); | 
| 2220 |             m_buffer.putByteUnchecked(value: OP_2BYTE_ESCAPE); | 
| 2221 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2222 |             memoryModRM(reg, base, offset); | 
| 2223 |         } | 
| 2224 |  | 
| 2225 |         void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset) | 
| 2226 |         { | 
| 2227 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2228 |             emitRexIfNeeded(r: reg, x: index, b: base); | 
| 2229 |             m_buffer.putByteUnchecked(value: OP_2BYTE_ESCAPE); | 
| 2230 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2231 |             memoryModRM(reg, base, index, scale, offset); | 
| 2232 |         } | 
| 2233 |  | 
| 2234 | #if !CPU(X86_64) | 
| 2235 |         void twoByteOp(TwoByteOpcodeID opcode, int reg, const void* address) | 
| 2236 |         { | 
| 2237 |             m_buffer.ensureSpace(maxInstructionSize); | 
| 2238 |             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE); | 
| 2239 |             m_buffer.putByteUnchecked(opcode); | 
| 2240 |             memoryModRM(reg, address); | 
| 2241 |         } | 
| 2242 | #endif | 
| 2243 |  | 
| 2244 | #if CPU(X86_64) | 
| 2245 |         // Quad-word-sized operands: | 
| 2246 |         // | 
| 2247 |         // Used to format 64-bit operantions, planting a REX.w prefix. | 
| 2248 |         // When planting d64 or f64 instructions, not requiring a REX.w prefix, | 
| 2249 |         // the normal (non-'64'-postfixed) formatters should be used. | 
| 2250 |  | 
| 2251 |         void oneByteOp64(OneByteOpcodeID opcode) | 
| 2252 |         { | 
| 2253 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2254 |             emitRexW(r: 0, x: 0, b: 0); | 
| 2255 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2256 |         } | 
| 2257 |  | 
| 2258 |         void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg) | 
| 2259 |         { | 
| 2260 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2261 |             emitRexW(r: 0, x: 0, b: reg); | 
| 2262 |             m_buffer.putByteUnchecked(value: opcode + (reg & 7)); | 
| 2263 |         } | 
| 2264 |  | 
| 2265 |         void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm) | 
| 2266 |         { | 
| 2267 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2268 |             emitRexW(r: reg, x: 0, b: rm); | 
| 2269 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2270 |             registerModRM(reg, rm); | 
| 2271 |         } | 
| 2272 |  | 
| 2273 |         void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) | 
| 2274 |         { | 
| 2275 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2276 |             emitRexW(r: reg, x: 0, b: base); | 
| 2277 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2278 |             memoryModRM(reg, base, offset); | 
| 2279 |         } | 
| 2280 |  | 
| 2281 |         void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) | 
| 2282 |         { | 
| 2283 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2284 |             emitRexW(r: reg, x: 0, b: base); | 
| 2285 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2286 |             memoryModRM_disp32(reg, base, offset); | 
| 2287 |         } | 
| 2288 |          | 
| 2289 |         void oneByteOp64_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) | 
| 2290 |         { | 
| 2291 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2292 |             emitRexW(r: reg, x: 0, b: base); | 
| 2293 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2294 |             memoryModRM_disp8(reg, base, offset); | 
| 2295 |         } | 
| 2296 |  | 
| 2297 |         void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset) | 
| 2298 |         { | 
| 2299 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2300 |             emitRexW(r: reg, x: index, b: base); | 
| 2301 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2302 |             memoryModRM(reg, base, index, scale, offset); | 
| 2303 |         } | 
| 2304 |  | 
| 2305 |         void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm) | 
| 2306 |         { | 
| 2307 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2308 |             emitRexW(r: reg, x: 0, b: rm); | 
| 2309 |             m_buffer.putByteUnchecked(value: OP_2BYTE_ESCAPE); | 
| 2310 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2311 |             registerModRM(reg, rm); | 
| 2312 |         } | 
| 2313 | #endif | 
| 2314 |  | 
| 2315 |         // Byte-operands: | 
| 2316 |         // | 
| 2317 |         // These methods format byte operations.  Byte operations differ from the normal | 
| 2318 |         // formatters in the circumstances under which they will decide to emit REX prefixes. | 
| 2319 |         // These should be used where any register operand signifies a byte register. | 
| 2320 |         // | 
| 2321 |         // The disctinction is due to the handling of register numbers in the range 4..7 on | 
| 2322 |         // x86-64.  These register numbers may either represent the second byte of the first | 
| 2323 |         // four registers (ah..bh) or the first byte of the second four registers (spl..dil). | 
| 2324 |         // | 
| 2325 |         // Since ah..bh cannot be used in all permutations of operands (specifically cannot | 
| 2326 |         // be accessed where a REX prefix is present), these are likely best treated as | 
| 2327 |         // deprecated.  In order to ensure the correct registers spl..dil are selected a | 
| 2328 |         // REX prefix will be emitted for any byte register operand in the range 4..15. | 
| 2329 |         // | 
| 2330 |         // These formatters may be used in instructions where a mix of operand sizes, in which | 
| 2331 |         // case an unnecessary REX will be emitted, for example: | 
| 2332 |         //     movzbl %al, %edi | 
| 2333 |         // In this case a REX will be planted since edi is 7 (and were this a byte operand | 
| 2334 |         // a REX would be required to specify dil instead of bh).  Unneeded REX prefixes will | 
| 2335 |         // be silently ignored by the processor. | 
| 2336 |         // | 
| 2337 |         // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex() | 
| 2338 |         // is provided to check byte register operands. | 
| 2339 |  | 
| 2340 |         void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm) | 
| 2341 |         { | 
| 2342 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2343 |             emitRexIf(condition: byteRegRequiresRex(reg: rm), r: 0, x: 0, b: rm); | 
| 2344 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2345 |             registerModRM(reg: groupOp, rm); | 
| 2346 |         } | 
| 2347 |  | 
| 2348 |         void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID rm) | 
| 2349 |         { | 
| 2350 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2351 |             emitRexIf(condition: byteRegRequiresRex(reg) || byteRegRequiresRex(reg: rm), r: reg, x: 0, b: rm); | 
| 2352 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2353 |             registerModRM(reg, rm); | 
| 2354 |         } | 
| 2355 |  | 
| 2356 |         void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset) | 
| 2357 |         { | 
| 2358 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2359 |             emitRexIf(condition: byteRegRequiresRex(reg) || regRequiresRex(reg: index) || regRequiresRex(reg: base), r: reg, x: index, b: base); | 
| 2360 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2361 |             memoryModRM(reg, base, index, scale, offset); | 
| 2362 |         } | 
| 2363 |  | 
| 2364 |         void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm) | 
| 2365 |         { | 
| 2366 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2367 |             emitRexIf(condition: byteRegRequiresRex(reg)|byteRegRequiresRex(reg: rm), r: reg, x: 0, b: rm); | 
| 2368 |             m_buffer.putByteUnchecked(value: OP_2BYTE_ESCAPE); | 
| 2369 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2370 |             registerModRM(reg, rm); | 
| 2371 |         } | 
| 2372 |  | 
| 2373 |         void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm) | 
| 2374 |         { | 
| 2375 |             m_buffer.ensureSpace(space: maxInstructionSize); | 
| 2376 |             emitRexIf(condition: byteRegRequiresRex(reg: rm), r: 0, x: 0, b: rm); | 
| 2377 |             m_buffer.putByteUnchecked(value: OP_2BYTE_ESCAPE); | 
| 2378 |             m_buffer.putByteUnchecked(value: opcode); | 
| 2379 |             registerModRM(reg: groupOp, rm); | 
| 2380 |         } | 
| 2381 |  | 
| 2382 |         // Immediates: | 
| 2383 |         // | 
| 2384 |         // An immedaite should be appended where appropriate after an op has been emitted. | 
| 2385 |         // The writes are unchecked since the opcode formatters above will have ensured space. | 
| 2386 |  | 
| 2387 |         void immediate8(int imm) | 
| 2388 |         { | 
| 2389 |             m_buffer.putByteUnchecked(value: imm); | 
| 2390 |         } | 
| 2391 |  | 
| 2392 |         void immediate16(int imm) | 
| 2393 |         { | 
| 2394 |             m_buffer.putShortUnchecked(value: imm); | 
| 2395 |         } | 
| 2396 |  | 
| 2397 |         void immediate32(int imm) | 
| 2398 |         { | 
| 2399 |             m_buffer.putIntUnchecked(value: imm); | 
| 2400 |         } | 
| 2401 |  | 
| 2402 |         void immediate64(int64_t imm) | 
| 2403 |         { | 
| 2404 |             m_buffer.putInt64Unchecked(value: imm); | 
| 2405 |         } | 
| 2406 |  | 
| 2407 |         AssemblerLabel immediateRel32() | 
| 2408 |         { | 
| 2409 |             m_buffer.putIntUnchecked(value: 0); | 
| 2410 |             return label(); | 
| 2411 |         } | 
| 2412 |  | 
| 2413 |         // Administrative methods: | 
| 2414 |  | 
| 2415 |         size_t codeSize() const { return m_buffer.codeSize(); } | 
| 2416 |         AssemblerLabel label() const { return m_buffer.label(); } | 
| 2417 |         bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); } | 
| 2418 |         void* data() const { return m_buffer.data(); } | 
| 2419 |  | 
| 2420 |         PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort) | 
| 2421 |         { | 
| 2422 |             return m_buffer.executableCopy(globalData, ownerUID, effort); | 
| 2423 |         } | 
| 2424 |  | 
| 2425 |         unsigned debugOffset() { return m_buffer.debugOffset(); } | 
| 2426 |  | 
| 2427 |     private: | 
| 2428 |  | 
| 2429 |         // Internals; ModRm and REX formatters. | 
| 2430 |  | 
| 2431 |         static const RegisterID noBase = X86Registers::ebp; | 
| 2432 |         static const RegisterID hasSib = X86Registers::esp; | 
| 2433 |         static const RegisterID noIndex = X86Registers::esp; | 
| 2434 | #if CPU(X86_64) | 
| 2435 |         static const RegisterID noBase2 = X86Registers::r13; | 
| 2436 |         static const RegisterID hasSib2 = X86Registers::r12; | 
| 2437 |  | 
| 2438 |         // Registers r8 & above require a REX prefixe. | 
| 2439 |         inline bool regRequiresRex(int reg) | 
| 2440 |         { | 
| 2441 |             return (reg >= X86Registers::r8); | 
| 2442 |         } | 
| 2443 |  | 
| 2444 |         // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed). | 
| 2445 |         inline bool byteRegRequiresRex(int reg) | 
| 2446 |         { | 
| 2447 |             return (reg >= X86Registers::esp); | 
| 2448 |         } | 
| 2449 |  | 
| 2450 |         // Format a REX prefix byte. | 
| 2451 |         inline void emitRex(bool w, int r, int x, int b) | 
| 2452 |         { | 
| 2453 |             ASSERT(r >= 0); | 
| 2454 |             ASSERT(x >= 0); | 
| 2455 |             ASSERT(b >= 0); | 
| 2456 |             m_buffer.putByteUnchecked(value: PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3)); | 
| 2457 |         } | 
| 2458 |  | 
| 2459 |         // Used to plant a REX byte with REX.w set (for 64-bit operations). | 
| 2460 |         inline void emitRexW(int r, int x, int b) | 
| 2461 |         { | 
| 2462 |             emitRex(w: true, r, x, b); | 
| 2463 |         } | 
| 2464 |  | 
| 2465 |         // Used for operations with byte operands - use byteRegRequiresRex() to check register operands, | 
| 2466 |         // regRequiresRex() to check other registers (i.e. address base & index). | 
| 2467 |         inline void emitRexIf(bool condition, int r, int x, int b) | 
| 2468 |         { | 
| 2469 |             if (condition) emitRex(w: false, r, x, b); | 
| 2470 |         } | 
| 2471 |  | 
| 2472 |         // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above). | 
| 2473 |         inline void emitRexIfNeeded(int r, int x, int b) | 
| 2474 |         { | 
| 2475 |             emitRexIf(condition: regRequiresRex(reg: r) || regRequiresRex(reg: x) || regRequiresRex(reg: b), r, x, b); | 
| 2476 |         } | 
| 2477 | #else | 
| 2478 |         // No REX prefix bytes on 32-bit x86. | 
| 2479 |         inline bool regRequiresRex(int) { return false; } | 
| 2480 |         inline bool byteRegRequiresRex(int) { return false; } | 
| 2481 |         inline void emitRexIf(bool, int, int, int) {} | 
| 2482 |         inline void emitRexIfNeeded(int, int, int) {} | 
| 2483 | #endif | 
| 2484 |  | 
| 2485 |         void putModRm(ModRmMode mode, int reg, RegisterID rm) | 
| 2486 |         { | 
| 2487 |             m_buffer.putByteUnchecked(value: (mode << 6) | ((reg & 7) << 3) | (rm & 7)); | 
| 2488 |         } | 
| 2489 |  | 
| 2490 |         void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale) | 
| 2491 |         { | 
| 2492 |             ASSERT(mode != ModRmRegister); | 
| 2493 |  | 
| 2494 |             putModRm(mode, reg, rm: hasSib); | 
| 2495 |             m_buffer.putByteUnchecked(value: (scale << 6) | ((index & 7) << 3) | (base & 7)); | 
| 2496 |         } | 
| 2497 |  | 
| 2498 |         void registerModRM(int reg, RegisterID rm) | 
| 2499 |         { | 
| 2500 |             putModRm(mode: ModRmRegister, reg, rm); | 
| 2501 |         } | 
| 2502 |  | 
| 2503 |         void memoryModRM(int reg, RegisterID base, int offset) | 
| 2504 |         { | 
| 2505 |             // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. | 
| 2506 | #if CPU(X86_64) | 
| 2507 |             if ((base == hasSib) || (base == hasSib2)) { | 
| 2508 | #else | 
| 2509 |             if (base == hasSib) { | 
| 2510 | #endif | 
| 2511 |                 if (!offset) // No need to check if the base is noBase, since we know it is hasSib! | 
| 2512 |                     putModRmSib(mode: ModRmMemoryNoDisp, reg, base, index: noIndex, scale: 0); | 
| 2513 |                 else if (CAN_SIGN_EXTEND_8_32(value: offset)) { | 
| 2514 |                     putModRmSib(mode: ModRmMemoryDisp8, reg, base, index: noIndex, scale: 0); | 
| 2515 |                     m_buffer.putByteUnchecked(value: offset); | 
| 2516 |                 } else { | 
| 2517 |                     putModRmSib(mode: ModRmMemoryDisp32, reg, base, index: noIndex, scale: 0); | 
| 2518 |                     m_buffer.putIntUnchecked(value: offset); | 
| 2519 |                 } | 
| 2520 |             } else { | 
| 2521 | #if CPU(X86_64) | 
| 2522 |                 if (!offset && (base != noBase) && (base != noBase2)) | 
| 2523 | #else | 
| 2524 |                 if (!offset && (base != noBase)) | 
| 2525 | #endif | 
| 2526 |                     putModRm(mode: ModRmMemoryNoDisp, reg, rm: base); | 
| 2527 |                 else if (CAN_SIGN_EXTEND_8_32(value: offset)) { | 
| 2528 |                     putModRm(mode: ModRmMemoryDisp8, reg, rm: base); | 
| 2529 |                     m_buffer.putByteUnchecked(value: offset); | 
| 2530 |                 } else { | 
| 2531 |                     putModRm(mode: ModRmMemoryDisp32, reg, rm: base); | 
| 2532 |                     m_buffer.putIntUnchecked(value: offset); | 
| 2533 |                 } | 
| 2534 |             } | 
| 2535 |         } | 
| 2536 |  | 
| 2537 |         void memoryModRM_disp8(int reg, RegisterID base, int offset) | 
| 2538 |         { | 
| 2539 |             // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. | 
| 2540 |             ASSERT(CAN_SIGN_EXTEND_8_32(offset)); | 
| 2541 | #if CPU(X86_64) | 
| 2542 |             if ((base == hasSib) || (base == hasSib2)) { | 
| 2543 | #else | 
| 2544 |             if (base == hasSib) { | 
| 2545 | #endif | 
| 2546 |                 putModRmSib(mode: ModRmMemoryDisp8, reg, base, index: noIndex, scale: 0); | 
| 2547 |                 m_buffer.putByteUnchecked(value: offset); | 
| 2548 |             } else { | 
| 2549 |                 putModRm(mode: ModRmMemoryDisp8, reg, rm: base); | 
| 2550 |                 m_buffer.putByteUnchecked(value: offset); | 
| 2551 |             } | 
| 2552 |         } | 
| 2553 |  | 
| 2554 |         void memoryModRM_disp32(int reg, RegisterID base, int offset) | 
| 2555 |         { | 
| 2556 |             // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. | 
| 2557 | #if CPU(X86_64) | 
| 2558 |             if ((base == hasSib) || (base == hasSib2)) { | 
| 2559 | #else | 
| 2560 |             if (base == hasSib) { | 
| 2561 | #endif | 
| 2562 |                 putModRmSib(mode: ModRmMemoryDisp32, reg, base, index: noIndex, scale: 0); | 
| 2563 |                 m_buffer.putIntUnchecked(value: offset); | 
| 2564 |             } else { | 
| 2565 |                 putModRm(mode: ModRmMemoryDisp32, reg, rm: base); | 
| 2566 |                 m_buffer.putIntUnchecked(value: offset); | 
| 2567 |             } | 
| 2568 |         } | 
| 2569 |      | 
| 2570 |         void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset) | 
| 2571 |         { | 
| 2572 |             ASSERT(index != noIndex); | 
| 2573 |  | 
| 2574 | #if CPU(X86_64) | 
| 2575 |             if (!offset && (base != noBase) && (base != noBase2)) | 
| 2576 | #else | 
| 2577 |             if (!offset && (base != noBase)) | 
| 2578 | #endif | 
| 2579 |                 putModRmSib(mode: ModRmMemoryNoDisp, reg, base, index, scale); | 
| 2580 |             else if (CAN_SIGN_EXTEND_8_32(value: offset)) { | 
| 2581 |                 putModRmSib(mode: ModRmMemoryDisp8, reg, base, index, scale); | 
| 2582 |                 m_buffer.putByteUnchecked(value: offset); | 
| 2583 |             } else { | 
| 2584 |                 putModRmSib(mode: ModRmMemoryDisp32, reg, base, index, scale); | 
| 2585 |                 m_buffer.putIntUnchecked(value: offset); | 
| 2586 |             } | 
| 2587 |         } | 
| 2588 |  | 
| 2589 | #if !CPU(X86_64) | 
| 2590 |         void memoryModRM(int reg, const void* address) | 
| 2591 |         { | 
| 2592 |             // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32! | 
| 2593 |             putModRm(ModRmMemoryNoDisp, reg, noBase); | 
| 2594 |             m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address)); | 
| 2595 |         } | 
| 2596 | #endif | 
| 2597 |  | 
| 2598 |         AssemblerBuffer m_buffer; | 
| 2599 |     } m_formatter; | 
| 2600 |     int m_indexOfLastWatchpoint; | 
| 2601 |     int m_indexOfTailOfLastWatchpoint; | 
| 2602 | }; | 
| 2603 |  | 
| 2604 | } // namespace JSC | 
| 2605 |  | 
| 2606 | #endif // ENABLE(ASSEMBLER) && CPU(X86) | 
| 2607 |  | 
| 2608 | #endif // X86Assembler_h | 
| 2609 |  |