1 | /* |
2 | * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #ifndef MacroAssemblerX86_64_h |
27 | #define MacroAssemblerX86_64_h |
28 | |
29 | #if ENABLE(ASSEMBLER) && CPU(X86_64) |
30 | |
31 | #include "MacroAssemblerX86Common.h" |
32 | |
33 | #define REPTACH_OFFSET_CALL_R11 3 |
34 | |
35 | namespace JSC { |
36 | |
37 | class MacroAssemblerX86_64 : public MacroAssemblerX86Common { |
38 | public: |
39 | static const Scale ScalePtr = TimesEight; |
40 | static const int PointerSize = 8; |
41 | |
42 | using MacroAssemblerX86Common::add32; |
43 | using MacroAssemblerX86Common::and32; |
44 | using MacroAssemblerX86Common::branchAdd32; |
45 | using MacroAssemblerX86Common::or32; |
46 | using MacroAssemblerX86Common::sub32; |
47 | using MacroAssemblerX86Common::load32; |
48 | using MacroAssemblerX86Common::store32; |
49 | using MacroAssemblerX86Common::store8; |
50 | using MacroAssemblerX86Common::call; |
51 | using MacroAssemblerX86Common::jump; |
52 | using MacroAssemblerX86Common::addDouble; |
53 | using MacroAssemblerX86Common::loadDouble; |
54 | using MacroAssemblerX86Common::convertInt32ToDouble; |
55 | |
56 | void add32(TrustedImm32 imm, AbsoluteAddress address) |
57 | { |
58 | move(imm: TrustedImmPtr(address.m_ptr), dest: scratchRegister); |
59 | add32(imm, address: Address(scratchRegister)); |
60 | } |
61 | |
62 | void and32(TrustedImm32 imm, AbsoluteAddress address) |
63 | { |
64 | move(imm: TrustedImmPtr(address.m_ptr), dest: scratchRegister); |
65 | and32(imm, address: Address(scratchRegister)); |
66 | } |
67 | |
68 | void add32(AbsoluteAddress address, RegisterID dest) |
69 | { |
70 | move(imm: TrustedImmPtr(address.m_ptr), dest: scratchRegister); |
71 | add32(src: Address(scratchRegister), dest); |
72 | } |
73 | |
74 | void or32(TrustedImm32 imm, AbsoluteAddress address) |
75 | { |
76 | move(imm: TrustedImmPtr(address.m_ptr), dest: scratchRegister); |
77 | or32(imm, address: Address(scratchRegister)); |
78 | } |
79 | |
80 | void or32(RegisterID reg, AbsoluteAddress address) |
81 | { |
82 | move(imm: TrustedImmPtr(address.m_ptr), dest: scratchRegister); |
83 | or32(src: reg, dest: Address(scratchRegister)); |
84 | } |
85 | |
86 | void sub32(TrustedImm32 imm, AbsoluteAddress address) |
87 | { |
88 | move(imm: TrustedImmPtr(address.m_ptr), dest: scratchRegister); |
89 | sub32(imm, address: Address(scratchRegister)); |
90 | } |
91 | |
92 | void load16(ExtendedAddress address, RegisterID dest) |
93 | { |
94 | TrustedImmPtr addr(reinterpret_cast<void*>(address.offset)); |
95 | MacroAssemblerX86Common::move(imm: addr, dest: scratchRegister); |
96 | MacroAssemblerX86Common::load16(address: BaseIndex(scratchRegister, address.base, TimesTwo), dest); |
97 | } |
98 | |
99 | void load16(BaseIndex address, RegisterID dest) |
100 | { |
101 | MacroAssemblerX86Common::load16(address, dest); |
102 | } |
103 | |
104 | void load16(Address address, RegisterID dest) |
105 | { |
106 | MacroAssemblerX86Common::load16(address, dest); |
107 | } |
108 | |
109 | void load32(const void* address, RegisterID dest) |
110 | { |
111 | if (dest == X86Registers::eax) |
112 | m_assembler.movl_mEAX(addr: address); |
113 | else { |
114 | move(imm: TrustedImmPtr(address), dest); |
115 | load32(address: dest, dest); |
116 | } |
117 | } |
118 | |
119 | void addDouble(AbsoluteAddress address, FPRegisterID dest) |
120 | { |
121 | move(imm: TrustedImmPtr(address.m_ptr), dest: scratchRegister); |
122 | m_assembler.addsd_mr(offset: 0, base: scratchRegister, dst: dest); |
123 | } |
124 | |
125 | void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest) |
126 | { |
127 | move(imm, dest: scratchRegister); |
128 | m_assembler.cvtsi2sd_rr(src: scratchRegister, dst: dest); |
129 | } |
130 | |
131 | void convertUInt32ToDouble(RegisterID src, FPRegisterID dest, RegisterID /*scratch*/) |
132 | { |
133 | zeroExtend32ToPtr(src, dest: src); |
134 | m_assembler.cvtsiq2sd_rr(src, dst: dest); |
135 | } |
136 | |
137 | void store32(TrustedImm32 imm, void* address) |
138 | { |
139 | move(imm: TrustedImmPtr(address), dest: scratchRegister); |
140 | store32(imm, address: scratchRegister); |
141 | } |
142 | |
143 | void store8(TrustedImm32 imm, void* address) |
144 | { |
145 | move(imm: TrustedImmPtr(address), dest: scratchRegister); |
146 | store8(imm, address: Address(scratchRegister)); |
147 | } |
148 | |
149 | Call call() |
150 | { |
151 | DataLabelPtr label = moveWithPatch(initialValue: TrustedImmPtr(0), dest: scratchRegister); |
152 | Call result = Call(m_assembler.call(dst: scratchRegister), Call::Linkable); |
153 | ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11); |
154 | return result; |
155 | } |
156 | |
157 | void callToRetrieveIP() |
158 | { |
159 | m_assembler.call(); |
160 | } |
161 | |
162 | // Address is a memory location containing the address to jump to |
163 | void jump(AbsoluteAddress address) |
164 | { |
165 | move(imm: TrustedImmPtr(address.m_ptr), dest: scratchRegister); |
166 | jump(address: Address(scratchRegister)); |
167 | } |
168 | |
169 | Call tailRecursiveCall() |
170 | { |
171 | DataLabelPtr label = moveWithPatch(initialValue: TrustedImmPtr(0), dest: scratchRegister); |
172 | Jump newJump = Jump(m_assembler.jmp_r(dst: scratchRegister)); |
173 | ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11); |
174 | return Call::fromTailJump(jump: newJump); |
175 | } |
176 | |
177 | Call makeTailRecursiveCall(Jump oldJump) |
178 | { |
179 | oldJump.link(masm: this); |
180 | DataLabelPtr label = moveWithPatch(initialValue: TrustedImmPtr(0), dest: scratchRegister); |
181 | Jump newJump = Jump(m_assembler.jmp_r(dst: scratchRegister)); |
182 | ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11); |
183 | return Call::fromTailJump(jump: newJump); |
184 | } |
185 | |
186 | Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest) |
187 | { |
188 | move(imm: TrustedImmPtr(dest.m_ptr), dest: scratchRegister); |
189 | add32(imm: src, address: Address(scratchRegister)); |
190 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
191 | } |
192 | |
193 | void add64(RegisterID src, RegisterID dest) |
194 | { |
195 | m_assembler.addq_rr(src, dst: dest); |
196 | } |
197 | |
198 | void add64(Address src, RegisterID dest) |
199 | { |
200 | m_assembler.addq_mr(offset: src.offset, base: src.base, dst: dest); |
201 | } |
202 | |
203 | void add64(AbsoluteAddress src, RegisterID dest) |
204 | { |
205 | move(imm: TrustedImmPtr(src.m_ptr), dest: scratchRegister); |
206 | add64(src: Address(scratchRegister), dest); |
207 | } |
208 | |
209 | void add64(TrustedImm32 imm, RegisterID srcDest) |
210 | { |
211 | m_assembler.addq_ir(imm: imm.m_value, dst: srcDest); |
212 | } |
213 | |
214 | void add64(TrustedImm64 imm, RegisterID dest) |
215 | { |
216 | move(imm, dest: scratchRegister); |
217 | add64(src: scratchRegister, dest); |
218 | } |
219 | |
220 | void add64(TrustedImm32 imm, RegisterID src, RegisterID dest) |
221 | { |
222 | m_assembler.leaq_mr(offset: imm.m_value, base: src, dst: dest); |
223 | } |
224 | |
225 | void add64(TrustedImm32 imm, Address address) |
226 | { |
227 | m_assembler.addq_im(imm: imm.m_value, offset: address.offset, base: address.base); |
228 | } |
229 | |
230 | void add64(TrustedImm32 imm, AbsoluteAddress address) |
231 | { |
232 | move(imm: TrustedImmPtr(address.m_ptr), dest: scratchRegister); |
233 | add64(imm, address: Address(scratchRegister)); |
234 | } |
235 | |
236 | void x86Lea64(BaseIndex index, RegisterID dest) |
237 | { |
238 | if (!index.scale && !index.offset) { |
239 | if (index.base == dest) { |
240 | add64(src: index.index, dest); |
241 | return; |
242 | } |
243 | if (index.index == dest) { |
244 | add64(src: index.base, dest); |
245 | return; |
246 | } |
247 | } |
248 | m_assembler.leaq_mr(offset: index.offset, base: index.base, index: index.index, scale: index.scale, dst: dest); |
249 | } |
250 | |
251 | void getEffectiveAddress(BaseIndex address, RegisterID dest) |
252 | { |
253 | return x86Lea64(index: address, dest); |
254 | } |
255 | |
256 | void and64(RegisterID src, RegisterID dest) |
257 | { |
258 | m_assembler.andq_rr(src, dst: dest); |
259 | } |
260 | |
261 | void and64(TrustedImm32 imm, RegisterID srcDest) |
262 | { |
263 | m_assembler.andq_ir(imm: imm.m_value, dst: srcDest); |
264 | } |
265 | |
266 | void neg64(RegisterID dest) |
267 | { |
268 | m_assembler.negq_r(dst: dest); |
269 | } |
270 | |
271 | void or64(RegisterID src, RegisterID dest) |
272 | { |
273 | m_assembler.orq_rr(src, dst: dest); |
274 | } |
275 | |
276 | void or64(TrustedImm64 imm, RegisterID dest) |
277 | { |
278 | move(imm, dest: scratchRegister); |
279 | or64(src: scratchRegister, dest); |
280 | } |
281 | |
282 | void or64(TrustedImm32 imm, RegisterID dest) |
283 | { |
284 | m_assembler.orq_ir(imm: imm.m_value, dst: dest); |
285 | } |
286 | |
287 | void or64(RegisterID op1, RegisterID op2, RegisterID dest) |
288 | { |
289 | if (op1 == op2) |
290 | move(src: op1, dest); |
291 | else if (op1 == dest) |
292 | or64(src: op2, dest); |
293 | else { |
294 | move(src: op2, dest); |
295 | or64(src: op1, dest); |
296 | } |
297 | } |
298 | |
299 | void or64(TrustedImm32 imm, RegisterID src, RegisterID dest) |
300 | { |
301 | move(src, dest); |
302 | or64(imm, dest); |
303 | } |
304 | |
305 | void or64(TrustedImm64 imm, RegisterID src, RegisterID dest) |
306 | { |
307 | move(src, dest); |
308 | or64(imm, dest); |
309 | } |
310 | |
311 | void rotateRight64(TrustedImm32 imm, RegisterID srcDst) |
312 | { |
313 | m_assembler.rorq_i8r(imm: imm.m_value, dst: srcDst); |
314 | } |
315 | |
316 | void sub64(RegisterID src, RegisterID dest) |
317 | { |
318 | m_assembler.subq_rr(src, dst: dest); |
319 | } |
320 | |
321 | void sub64(TrustedImm32 imm, RegisterID dest) |
322 | { |
323 | m_assembler.subq_ir(imm: imm.m_value, dst: dest); |
324 | } |
325 | |
326 | void sub64(TrustedImm64 imm, RegisterID dest) |
327 | { |
328 | move(imm, dest: scratchRegister); |
329 | sub64(src: scratchRegister, dest); |
330 | } |
331 | |
332 | void xor64(RegisterID src, RegisterID dest) |
333 | { |
334 | m_assembler.xorq_rr(src, dst: dest); |
335 | } |
336 | |
337 | void xor64(RegisterID src, Address dest) |
338 | { |
339 | m_assembler.xorq_rm(src, offset: dest.offset, base: dest.base); |
340 | } |
341 | |
342 | void xor64(TrustedImm32 imm, RegisterID srcDest) |
343 | { |
344 | m_assembler.xorq_ir(imm: imm.m_value, dst: srcDest); |
345 | } |
346 | |
347 | void lshift64(TrustedImm32 imm, RegisterID dest) |
348 | { |
349 | m_assembler.shlq_i8r(imm: imm.m_value, dst: dest); |
350 | } |
351 | |
352 | void lshift64(RegisterID src, RegisterID dest) |
353 | { |
354 | if (src == X86Registers::ecx) |
355 | m_assembler.shlq_CLr(dst: dest); |
356 | else { |
357 | ASSERT(src != dest); |
358 | |
359 | // Can only shift by ecx, so we do some swapping if we see anything else. |
360 | swap(reg1: src, reg2: X86Registers::ecx); |
361 | m_assembler.shlq_CLr(dst: dest == X86Registers::ecx ? src : dest); |
362 | swap(reg1: src, reg2: X86Registers::ecx); |
363 | } |
364 | } |
365 | |
366 | void rshift64(TrustedImm32 imm, RegisterID dest) |
367 | { |
368 | m_assembler.sarq_i8r(imm: imm.m_value, dst: dest); |
369 | } |
370 | |
371 | void rshift64(RegisterID src, RegisterID dest) |
372 | { |
373 | if (src == X86Registers::ecx) |
374 | m_assembler.sarq_CLr(dst: dest); |
375 | else { |
376 | ASSERT(src != dest); |
377 | |
378 | // Can only shift by ecx, so we do some swapping if we see anything else. |
379 | swap(reg1: src, reg2: X86Registers::ecx); |
380 | m_assembler.sarq_CLr(dst: dest == X86Registers::ecx ? src : dest); |
381 | swap(reg1: src, reg2: X86Registers::ecx); |
382 | } |
383 | } |
384 | |
385 | void urshift64(RegisterID src, TrustedImm32 imm, RegisterID dest) |
386 | { |
387 | if (src != dest) |
388 | move(src, dest); |
389 | urshift64(imm, dest); |
390 | } |
391 | |
392 | void urshift64(TrustedImm32 imm, RegisterID dest) |
393 | { |
394 | m_assembler.shrq_i8r(imm: imm.m_value, dst: dest); |
395 | } |
396 | |
397 | void urshift64(RegisterID src, RegisterID dest) |
398 | { |
399 | if (src == X86Registers::ecx) |
400 | m_assembler.shrq_CLr(dst: dest); |
401 | else { |
402 | ASSERT(src != dest); |
403 | |
404 | // Can only shift by ecx, so we do some swapping if we see anything else. |
405 | swap(reg1: src, reg2: X86Registers::ecx); |
406 | m_assembler.shrq_CLr(dst: dest == X86Registers::ecx ? src : dest); |
407 | swap(reg1: src, reg2: X86Registers::ecx); |
408 | } |
409 | } |
410 | |
411 | void load64(ImplicitAddress address, RegisterID dest) |
412 | { |
413 | m_assembler.movq_mr(offset: address.offset, base: address.base, dst: dest); |
414 | } |
415 | |
416 | void load64(BaseIndex address, RegisterID dest) |
417 | { |
418 | m_assembler.movq_mr(offset: address.offset, base: address.base, index: address.index, scale: address.scale, dst: dest); |
419 | } |
420 | |
421 | void load64(const void* address, RegisterID dest) |
422 | { |
423 | if (dest == X86Registers::eax) |
424 | m_assembler.movq_mEAX(addr: address); |
425 | else { |
426 | move(imm: TrustedImmPtr(address), dest); |
427 | load64(address: dest, dest); |
428 | } |
429 | } |
430 | |
431 | DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest) |
432 | { |
433 | padBeforePatch(); |
434 | m_assembler.movq_mr_disp32(offset: address.offset, base: address.base, dst: dest); |
435 | return DataLabel32(this); |
436 | } |
437 | |
438 | DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest) |
439 | { |
440 | padBeforePatch(); |
441 | m_assembler.movq_mr_disp8(offset: address.offset, base: address.base, dst: dest); |
442 | return DataLabelCompact(this); |
443 | } |
444 | |
445 | void store64(RegisterID src, ImplicitAddress address) |
446 | { |
447 | m_assembler.movq_rm(src, offset: address.offset, base: address.base); |
448 | } |
449 | |
450 | void store64(RegisterID src, BaseIndex address) |
451 | { |
452 | m_assembler.movq_rm(src, offset: address.offset, base: address.base, index: address.index, scale: address.scale); |
453 | } |
454 | |
455 | void store64(RegisterID src, void* address) |
456 | { |
457 | if (src == X86Registers::eax) |
458 | m_assembler.movq_EAXm(addr: address); |
459 | else { |
460 | move(imm: TrustedImmPtr(address), dest: scratchRegister); |
461 | store64(src, address: scratchRegister); |
462 | } |
463 | } |
464 | |
465 | void store64(TrustedImm64 imm, ImplicitAddress address) |
466 | { |
467 | move(imm, dest: scratchRegister); |
468 | store64(src: scratchRegister, address); |
469 | } |
470 | |
471 | void store64(TrustedImm64 imm, BaseIndex address) |
472 | { |
473 | move(imm, dest: scratchRegister); |
474 | m_assembler.movq_rm(src: scratchRegister, offset: address.offset, base: address.base, index: address.index, scale: address.scale); |
475 | } |
476 | |
477 | DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address) |
478 | { |
479 | padBeforePatch(); |
480 | m_assembler.movq_rm_disp32(src, offset: address.offset, base: address.base); |
481 | return DataLabel32(this); |
482 | } |
483 | |
484 | void move64ToDouble(RegisterID src, FPRegisterID dest) |
485 | { |
486 | m_assembler.movq_rr(src, dst: dest); |
487 | } |
488 | |
489 | void moveDoubleTo64(FPRegisterID src, RegisterID dest) |
490 | { |
491 | m_assembler.movq_rr(src, dst: dest); |
492 | } |
493 | |
494 | void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) |
495 | { |
496 | if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) |
497 | m_assembler.testq_rr(src: left, dst: left); |
498 | else |
499 | m_assembler.cmpq_ir(imm: right.m_value, dst: left); |
500 | m_assembler.setCC_r(cond: x86Condition(cond), dst: dest); |
501 | m_assembler.movzbl_rr(src: dest, dst: dest); |
502 | } |
503 | |
504 | void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) |
505 | { |
506 | m_assembler.cmpq_rr(src: right, dst: left); |
507 | m_assembler.setCC_r(cond: x86Condition(cond), dst: dest); |
508 | m_assembler.movzbl_rr(src: dest, dst: dest); |
509 | } |
510 | |
511 | Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right) |
512 | { |
513 | m_assembler.cmpq_rr(src: right, dst: left); |
514 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
515 | } |
516 | |
517 | Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right) |
518 | { |
519 | if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) { |
520 | m_assembler.testq_rr(src: left, dst: left); |
521 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
522 | } |
523 | move(imm: right, dest: scratchRegister); |
524 | return branch64(cond, left, right: scratchRegister); |
525 | } |
526 | |
527 | Jump branch64(RelationalCondition cond, RegisterID left, Address right) |
528 | { |
529 | m_assembler.cmpq_mr(offset: right.offset, base: right.base, src: left); |
530 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
531 | } |
532 | |
533 | Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right) |
534 | { |
535 | move(imm: TrustedImmPtr(left.m_ptr), dest: scratchRegister); |
536 | return branch64(cond, left: Address(scratchRegister), right); |
537 | } |
538 | |
539 | Jump branch64(RelationalCondition cond, Address left, RegisterID right) |
540 | { |
541 | m_assembler.cmpq_rm(src: right, offset: left.offset, base: left.base); |
542 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
543 | } |
544 | |
545 | Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right) |
546 | { |
547 | move(imm: right, dest: scratchRegister); |
548 | return branch64(cond, left, right: scratchRegister); |
549 | } |
550 | |
551 | Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask) |
552 | { |
553 | m_assembler.testq_rr(src: reg, dst: mask); |
554 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
555 | } |
556 | |
557 | Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) |
558 | { |
559 | // if we are only interested in the low seven bits, this can be tested with a testb |
560 | if (mask.m_value == -1) |
561 | m_assembler.testq_rr(src: reg, dst: reg); |
562 | else if ((mask.m_value & ~0x7f) == 0) |
563 | m_assembler.testb_i8r(imm: mask.m_value, dst: reg); |
564 | else |
565 | m_assembler.testq_i32r(imm: mask.m_value, dst: reg); |
566 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
567 | } |
568 | |
569 | void test64(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest) |
570 | { |
571 | if (mask.m_value == -1) |
572 | m_assembler.testq_rr(src: reg, dst: reg); |
573 | else if ((mask.m_value & ~0x7f) == 0) |
574 | m_assembler.testb_i8r(imm: mask.m_value, dst: reg); |
575 | else |
576 | m_assembler.testq_i32r(imm: mask.m_value, dst: reg); |
577 | set32(cond: x86Condition(cond), dest); |
578 | } |
579 | |
580 | void test64(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest) |
581 | { |
582 | m_assembler.testq_rr(src: reg, dst: mask); |
583 | set32(cond: x86Condition(cond), dest); |
584 | } |
585 | |
586 | Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) |
587 | { |
588 | load64(address: address.m_ptr, dest: scratchRegister); |
589 | return branchTest64(cond, reg: scratchRegister, mask); |
590 | } |
591 | |
592 | Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) |
593 | { |
594 | if (mask.m_value == -1) |
595 | m_assembler.cmpq_im(imm: 0, offset: address.offset, base: address.base); |
596 | else |
597 | m_assembler.testq_i32m(imm: mask.m_value, offset: address.offset, base: address.base); |
598 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
599 | } |
600 | |
601 | Jump branchTest64(ResultCondition cond, Address address, RegisterID reg) |
602 | { |
603 | m_assembler.testq_rm(src: reg, offset: address.offset, base: address.base); |
604 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
605 | } |
606 | |
607 | Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) |
608 | { |
609 | if (mask.m_value == -1) |
610 | m_assembler.cmpq_im(imm: 0, offset: address.offset, base: address.base, index: address.index, scale: address.scale); |
611 | else |
612 | m_assembler.testq_i32m(imm: mask.m_value, offset: address.offset, base: address.base, index: address.index, scale: address.scale); |
613 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
614 | } |
615 | |
616 | |
617 | Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest) |
618 | { |
619 | add64(imm, srcDest: dest); |
620 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
621 | } |
622 | |
623 | Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest) |
624 | { |
625 | add64(src, dest); |
626 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
627 | } |
628 | |
629 | Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest) |
630 | { |
631 | sub64(imm, dest); |
632 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
633 | } |
634 | |
635 | Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest) |
636 | { |
637 | sub64(src, dest); |
638 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
639 | } |
640 | |
641 | Jump branchSub64(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest) |
642 | { |
643 | move(src: src1, dest); |
644 | return branchSub64(cond, imm: src2, dest); |
645 | } |
646 | |
647 | ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) |
648 | { |
649 | ConvertibleLoadLabel result = ConvertibleLoadLabel(this); |
650 | m_assembler.movq_mr(offset: address.offset, base: address.base, dst: dest); |
651 | return result; |
652 | } |
653 | |
654 | DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest) |
655 | { |
656 | padBeforePatch(); |
657 | m_assembler.movq_i64r(imm: initialValue.asIntptr(), dst: dest); |
658 | return DataLabelPtr(this); |
659 | } |
660 | |
661 | Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) |
662 | { |
663 | dataLabel = moveWithPatch(initialValue: initialRightValue, dest: scratchRegister); |
664 | return branch64(cond, left, right: scratchRegister); |
665 | } |
666 | |
667 | Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) |
668 | { |
669 | dataLabel = moveWithPatch(initialValue: initialRightValue, dest: scratchRegister); |
670 | return branch64(cond, left, right: scratchRegister); |
671 | } |
672 | |
673 | DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) |
674 | { |
675 | DataLabelPtr label = moveWithPatch(initialValue, dest: scratchRegister); |
676 | store64(src: scratchRegister, address); |
677 | return label; |
678 | } |
679 | |
680 | using MacroAssemblerX86Common::branchTest8; |
681 | Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1)) |
682 | { |
683 | TrustedImmPtr addr(reinterpret_cast<void*>(address.offset)); |
684 | MacroAssemblerX86Common::move(imm: addr, dest: scratchRegister); |
685 | return MacroAssemblerX86Common::branchTest8(cond, address: BaseIndex(scratchRegister, address.base, TimesOne), mask); |
686 | } |
687 | |
688 | Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) |
689 | { |
690 | MacroAssemblerX86Common::move(imm: TrustedImmPtr(address.m_ptr), dest: scratchRegister); |
691 | return MacroAssemblerX86Common::branchTest8(cond, address: Address(scratchRegister), mask); |
692 | } |
693 | |
694 | static bool supportsFloatingPoint() { return true; } |
695 | // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate() |
696 | static bool supportsFloatingPointTruncate() { return true; } |
697 | static bool supportsFloatingPointSqrt() { return true; } |
698 | static bool supportsFloatingPointAbs() { return true; } |
699 | |
700 | static FunctionPtr readCallTarget(CodeLocationCall call) |
701 | { |
702 | return FunctionPtr(X86Assembler::readPointer(where: call.dataLabelPtrAtOffset(offset: -REPTACH_OFFSET_CALL_R11).dataLocation())); |
703 | } |
704 | |
705 | static RegisterID scratchRegisterForBlinding() { return scratchRegister; } |
706 | |
707 | static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; } |
708 | |
709 | static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) |
710 | { |
711 | const int rexBytes = 1; |
712 | const int opcodeBytes = 1; |
713 | const int immediateBytes = 8; |
714 | const int totalBytes = rexBytes + opcodeBytes + immediateBytes; |
715 | ASSERT(totalBytes >= maxJumpReplacementSize()); |
716 | return label.labelAtOffset(offset: -totalBytes); |
717 | } |
718 | |
719 | static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label) |
720 | { |
721 | return startOfBranchPtrWithPatchOnRegister(label); |
722 | } |
723 | |
724 | static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue) |
725 | { |
726 | X86Assembler::revertJumpTo_movq_i64r(instructionStart: instructionStart.executableAddress(), imm: reinterpret_cast<intptr_t>(initialValue), dst: scratchRegister); |
727 | } |
728 | |
729 | static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue) |
730 | { |
731 | X86Assembler::revertJumpTo_movq_i64r(instructionStart: instructionStart.executableAddress(), imm: reinterpret_cast<intptr_t>(initialValue), dst: scratchRegister); |
732 | } |
733 | |
734 | private: |
735 | template <typename, template <typename> class> friend class LinkBufferBase; |
736 | |
737 | static void linkCall(void* code, Call call, FunctionPtr function) |
738 | { |
739 | if (!call.isFlagSet(flag: Call::Near)) |
740 | X86Assembler::linkPointer(code, where: call.m_label.labelAtOffset(offset: -REPTACH_OFFSET_CALL_R11), value: function.value()); |
741 | else |
742 | X86Assembler::linkCall(code, from: call.m_label, to: function.value()); |
743 | } |
744 | |
745 | static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) |
746 | { |
747 | X86Assembler::repatchPointer(where: call.dataLabelPtrAtOffset(offset: -REPTACH_OFFSET_CALL_R11).dataLocation(), value: destination.executableAddress()); |
748 | } |
749 | |
750 | static void repatchCall(CodeLocationCall call, FunctionPtr destination) |
751 | { |
752 | X86Assembler::repatchPointer(where: call.dataLabelPtrAtOffset(offset: -REPTACH_OFFSET_CALL_R11).dataLocation(), value: destination.executableAddress()); |
753 | } |
754 | |
755 | }; |
756 | |
757 | } // namespace JSC |
758 | |
759 | #endif // ENABLE(ASSEMBLER) |
760 | |
761 | #endif // MacroAssemblerX86_64_h |
762 | |