1 | /* |
2 | * Copyright (C) 2008 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #ifndef MacroAssemblerX86_64_h |
27 | #define MacroAssemblerX86_64_h |
28 | |
29 | #include <wtf/Platform.h> |
30 | |
31 | #if ENABLE(ASSEMBLER) && CPU(X86_64) |
32 | |
33 | #include "MacroAssemblerX86Common.h" |
34 | |
35 | #define REPTACH_OFFSET_CALL_R11 3 |
36 | |
37 | namespace JSC { |
38 | |
39 | class MacroAssemblerX86_64 : public MacroAssemblerX86Common { |
40 | protected: |
41 | static const X86Registers::RegisterID scratchRegister = X86Registers::r11; |
42 | |
43 | public: |
44 | static const Scale ScalePtr = TimesEight; |
45 | |
46 | using MacroAssemblerX86Common::add32; |
47 | using MacroAssemblerX86Common::and32; |
48 | using MacroAssemblerX86Common::or32; |
49 | using MacroAssemblerX86Common::sub32; |
50 | using MacroAssemblerX86Common::load32; |
51 | using MacroAssemblerX86Common::store32; |
52 | using MacroAssemblerX86Common::call; |
53 | using MacroAssemblerX86Common::loadDouble; |
54 | using MacroAssemblerX86Common::convertInt32ToDouble; |
55 | |
56 | void add32(Imm32 imm, AbsoluteAddress address) |
57 | { |
58 | move(imm: ImmPtr(address.m_ptr), dest: scratchRegister); |
59 | add32(imm, address: Address(scratchRegister)); |
60 | } |
61 | |
62 | void and32(Imm32 imm, AbsoluteAddress address) |
63 | { |
64 | move(imm: ImmPtr(address.m_ptr), dest: scratchRegister); |
65 | and32(imm, address: Address(scratchRegister)); |
66 | } |
67 | |
68 | void or32(Imm32 imm, AbsoluteAddress address) |
69 | { |
70 | move(imm: ImmPtr(address.m_ptr), dest: scratchRegister); |
71 | or32(imm, address: Address(scratchRegister)); |
72 | } |
73 | |
74 | void sub32(Imm32 imm, AbsoluteAddress address) |
75 | { |
76 | move(imm: ImmPtr(address.m_ptr), dest: scratchRegister); |
77 | sub32(imm, address: Address(scratchRegister)); |
78 | } |
79 | |
80 | void load32(void* address, RegisterID dest) |
81 | { |
82 | if (dest == X86Registers::eax) |
83 | m_assembler.movl_mEAX(addr: address); |
84 | else { |
85 | move(src: X86Registers::eax, dest); |
86 | m_assembler.movl_mEAX(addr: address); |
87 | swap(reg1: X86Registers::eax, reg2: dest); |
88 | } |
89 | } |
90 | |
91 | void loadDouble(void* address, FPRegisterID dest) |
92 | { |
93 | move(imm: ImmPtr(address), dest: scratchRegister); |
94 | loadDouble(address: scratchRegister, dest); |
95 | } |
96 | |
97 | void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest) |
98 | { |
99 | move(imm: Imm32(*static_cast<int32_t*>(src.m_ptr)), dest: scratchRegister); |
100 | m_assembler.cvtsi2sd_rr(src: scratchRegister, dst: dest); |
101 | } |
102 | |
103 | void store32(Imm32 imm, void* address) |
104 | { |
105 | move(src: X86Registers::eax, dest: scratchRegister); |
106 | move(imm, dest: X86Registers::eax); |
107 | m_assembler.movl_EAXm(addr: address); |
108 | move(src: scratchRegister, dest: X86Registers::eax); |
109 | } |
110 | |
111 | Call call() |
112 | { |
113 | DataLabelPtr label = moveWithPatch(initialValue: ImmPtr(0), dest: scratchRegister); |
114 | Call result = Call(m_assembler.call(dst: scratchRegister), Call::Linkable); |
115 | ASSERT(differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11); |
116 | return result; |
117 | } |
118 | |
119 | Call tailRecursiveCall() |
120 | { |
121 | DataLabelPtr label = moveWithPatch(initialValue: ImmPtr(0), dest: scratchRegister); |
122 | Jump newJump = Jump(m_assembler.jmp_r(dst: scratchRegister)); |
123 | ASSERT(differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11); |
124 | return Call::fromTailJump(jump: newJump); |
125 | } |
126 | |
127 | Call makeTailRecursiveCall(Jump oldJump) |
128 | { |
129 | oldJump.link(masm: this); |
130 | DataLabelPtr label = moveWithPatch(initialValue: ImmPtr(0), dest: scratchRegister); |
131 | Jump newJump = Jump(m_assembler.jmp_r(dst: scratchRegister)); |
132 | ASSERT(differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11); |
133 | return Call::fromTailJump(jump: newJump); |
134 | } |
135 | |
136 | |
137 | void addPtr(RegisterID src, RegisterID dest) |
138 | { |
139 | m_assembler.addq_rr(src, dst: dest); |
140 | } |
141 | |
142 | void addPtr(Imm32 imm, RegisterID srcDest) |
143 | { |
144 | m_assembler.addq_ir(imm: imm.m_value, dst: srcDest); |
145 | } |
146 | |
147 | void addPtr(ImmPtr imm, RegisterID dest) |
148 | { |
149 | move(imm, dest: scratchRegister); |
150 | m_assembler.addq_rr(src: scratchRegister, dst: dest); |
151 | } |
152 | |
153 | void addPtr(Imm32 imm, RegisterID src, RegisterID dest) |
154 | { |
155 | m_assembler.leaq_mr(offset: imm.m_value, base: src, dst: dest); |
156 | } |
157 | |
158 | void addPtr(Imm32 imm, Address address) |
159 | { |
160 | m_assembler.addq_im(imm: imm.m_value, offset: address.offset, base: address.base); |
161 | } |
162 | |
163 | void addPtr(Imm32 imm, AbsoluteAddress address) |
164 | { |
165 | move(imm: ImmPtr(address.m_ptr), dest: scratchRegister); |
166 | addPtr(imm, address: Address(scratchRegister)); |
167 | } |
168 | |
169 | void andPtr(RegisterID src, RegisterID dest) |
170 | { |
171 | m_assembler.andq_rr(src, dst: dest); |
172 | } |
173 | |
174 | void andPtr(Imm32 imm, RegisterID srcDest) |
175 | { |
176 | m_assembler.andq_ir(imm: imm.m_value, dst: srcDest); |
177 | } |
178 | |
179 | void orPtr(RegisterID src, RegisterID dest) |
180 | { |
181 | m_assembler.orq_rr(src, dst: dest); |
182 | } |
183 | |
184 | void orPtr(ImmPtr imm, RegisterID dest) |
185 | { |
186 | move(imm, dest: scratchRegister); |
187 | m_assembler.orq_rr(src: scratchRegister, dst: dest); |
188 | } |
189 | |
190 | void orPtr(Imm32 imm, RegisterID dest) |
191 | { |
192 | m_assembler.orq_ir(imm: imm.m_value, dst: dest); |
193 | } |
194 | |
195 | void subPtr(RegisterID src, RegisterID dest) |
196 | { |
197 | m_assembler.subq_rr(src, dst: dest); |
198 | } |
199 | |
200 | void subPtr(Imm32 imm, RegisterID dest) |
201 | { |
202 | m_assembler.subq_ir(imm: imm.m_value, dst: dest); |
203 | } |
204 | |
205 | void subPtr(ImmPtr imm, RegisterID dest) |
206 | { |
207 | move(imm, dest: scratchRegister); |
208 | m_assembler.subq_rr(src: scratchRegister, dst: dest); |
209 | } |
210 | |
211 | void xorPtr(RegisterID src, RegisterID dest) |
212 | { |
213 | m_assembler.xorq_rr(src, dst: dest); |
214 | } |
215 | |
216 | void xorPtr(Imm32 imm, RegisterID srcDest) |
217 | { |
218 | m_assembler.xorq_ir(imm: imm.m_value, dst: srcDest); |
219 | } |
220 | |
221 | |
222 | void loadPtr(ImplicitAddress address, RegisterID dest) |
223 | { |
224 | m_assembler.movq_mr(offset: address.offset, base: address.base, dst: dest); |
225 | } |
226 | |
227 | void loadPtr(BaseIndex address, RegisterID dest) |
228 | { |
229 | m_assembler.movq_mr(offset: address.offset, base: address.base, index: address.index, scale: address.scale, dst: dest); |
230 | } |
231 | |
232 | void loadPtr(void* address, RegisterID dest) |
233 | { |
234 | if (dest == X86Registers::eax) |
235 | m_assembler.movq_mEAX(addr: address); |
236 | else { |
237 | move(src: X86Registers::eax, dest); |
238 | m_assembler.movq_mEAX(addr: address); |
239 | swap(reg1: X86Registers::eax, reg2: dest); |
240 | } |
241 | } |
242 | |
243 | DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest) |
244 | { |
245 | m_assembler.movq_mr_disp32(offset: address.offset, base: address.base, dst: dest); |
246 | return DataLabel32(this); |
247 | } |
248 | |
249 | void storePtr(RegisterID src, ImplicitAddress address) |
250 | { |
251 | m_assembler.movq_rm(src, offset: address.offset, base: address.base); |
252 | } |
253 | |
254 | void storePtr(RegisterID src, BaseIndex address) |
255 | { |
256 | m_assembler.movq_rm(src, offset: address.offset, base: address.base, index: address.index, scale: address.scale); |
257 | } |
258 | |
259 | void storePtr(RegisterID src, void* address) |
260 | { |
261 | if (src == X86Registers::eax) |
262 | m_assembler.movq_EAXm(addr: address); |
263 | else { |
264 | swap(reg1: X86Registers::eax, reg2: src); |
265 | m_assembler.movq_EAXm(addr: address); |
266 | swap(reg1: X86Registers::eax, reg2: src); |
267 | } |
268 | } |
269 | |
270 | void storePtr(ImmPtr imm, ImplicitAddress address) |
271 | { |
272 | move(imm, dest: scratchRegister); |
273 | storePtr(src: scratchRegister, address); |
274 | } |
275 | |
276 | DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address) |
277 | { |
278 | m_assembler.movq_rm_disp32(src, offset: address.offset, base: address.base); |
279 | return DataLabel32(this); |
280 | } |
281 | |
282 | void movePtrToDouble(RegisterID src, FPRegisterID dest) |
283 | { |
284 | m_assembler.movq_rr(src, dst: dest); |
285 | } |
286 | |
287 | void moveDoubleToPtr(FPRegisterID src, RegisterID dest) |
288 | { |
289 | m_assembler.movq_rr(src, dst: dest); |
290 | } |
291 | |
292 | void setPtr(Condition cond, RegisterID left, Imm32 right, RegisterID dest) |
293 | { |
294 | if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) |
295 | m_assembler.testq_rr(src: left, dst: left); |
296 | else |
297 | m_assembler.cmpq_ir(imm: right.m_value, dst: left); |
298 | m_assembler.setCC_r(cond: x86Condition(cond), dst: dest); |
299 | m_assembler.movzbl_rr(src: dest, dst: dest); |
300 | } |
301 | |
302 | Jump branchPtr(Condition cond, RegisterID left, RegisterID right) |
303 | { |
304 | m_assembler.cmpq_rr(src: right, dst: left); |
305 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
306 | } |
307 | |
308 | Jump branchPtr(Condition cond, RegisterID left, ImmPtr right) |
309 | { |
310 | move(imm: right, dest: scratchRegister); |
311 | return branchPtr(cond, left, right: scratchRegister); |
312 | } |
313 | |
314 | Jump branchPtr(Condition cond, RegisterID left, Address right) |
315 | { |
316 | m_assembler.cmpq_mr(offset: right.offset, base: right.base, src: left); |
317 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
318 | } |
319 | |
320 | Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right) |
321 | { |
322 | move(imm: ImmPtr(left.m_ptr), dest: scratchRegister); |
323 | return branchPtr(cond, left: Address(scratchRegister), right); |
324 | } |
325 | |
326 | Jump branchPtr(Condition cond, Address left, RegisterID right) |
327 | { |
328 | m_assembler.cmpq_rm(src: right, offset: left.offset, base: left.base); |
329 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
330 | } |
331 | |
332 | Jump branchPtr(Condition cond, Address left, ImmPtr right) |
333 | { |
334 | move(imm: right, dest: scratchRegister); |
335 | return branchPtr(cond, left, right: scratchRegister); |
336 | } |
337 | |
338 | Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask) |
339 | { |
340 | m_assembler.testq_rr(src: reg, dst: mask); |
341 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
342 | } |
343 | |
344 | Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1)) |
345 | { |
346 | // if we are only interested in the low seven bits, this can be tested with a testb |
347 | if (mask.m_value == -1) |
348 | m_assembler.testq_rr(src: reg, dst: reg); |
349 | else if ((mask.m_value & ~0x7f) == 0) |
350 | m_assembler.testb_i8r(imm: mask.m_value, dst: reg); |
351 | else |
352 | m_assembler.testq_i32r(imm: mask.m_value, dst: reg); |
353 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
354 | } |
355 | |
356 | Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1)) |
357 | { |
358 | if (mask.m_value == -1) |
359 | m_assembler.cmpq_im(imm: 0, offset: address.offset, base: address.base); |
360 | else |
361 | m_assembler.testq_i32m(imm: mask.m_value, offset: address.offset, base: address.base); |
362 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
363 | } |
364 | |
365 | Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1)) |
366 | { |
367 | if (mask.m_value == -1) |
368 | m_assembler.cmpq_im(imm: 0, offset: address.offset, base: address.base, index: address.index, scale: address.scale); |
369 | else |
370 | m_assembler.testq_i32m(imm: mask.m_value, offset: address.offset, base: address.base, index: address.index, scale: address.scale); |
371 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
372 | } |
373 | |
374 | |
375 | Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest) |
376 | { |
377 | ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); |
378 | addPtr(src, dest); |
379 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
380 | } |
381 | |
382 | Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest) |
383 | { |
384 | ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); |
385 | subPtr(imm, dest); |
386 | return Jump(m_assembler.jCC(cond: x86Condition(cond))); |
387 | } |
388 | |
389 | DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest) |
390 | { |
391 | m_assembler.movq_i64r(imm: initialValue.asIntptr(), dst: dest); |
392 | return DataLabelPtr(this); |
393 | } |
394 | |
395 | Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0)) |
396 | { |
397 | dataLabel = moveWithPatch(initialValue: initialRightValue, dest: scratchRegister); |
398 | return branchPtr(cond, left, right: scratchRegister); |
399 | } |
400 | |
401 | Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0)) |
402 | { |
403 | dataLabel = moveWithPatch(initialValue: initialRightValue, dest: scratchRegister); |
404 | return branchPtr(cond, left, right: scratchRegister); |
405 | } |
406 | |
407 | DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address) |
408 | { |
409 | DataLabelPtr label = moveWithPatch(initialValue, dest: scratchRegister); |
410 | storePtr(src: scratchRegister, address); |
411 | return label; |
412 | } |
413 | |
414 | Label loadPtrWithPatchToLEA(Address address, RegisterID dest) |
415 | { |
416 | Label label(this); |
417 | loadPtr(address, dest); |
418 | return label; |
419 | } |
420 | |
421 | bool supportsFloatingPoint() const { return true; } |
422 | // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate() |
423 | bool supportsFloatingPointTruncate() const { return true; } |
424 | |
425 | private: |
426 | friend class LinkBuffer; |
427 | friend class RepatchBuffer; |
428 | |
429 | static void linkCall(void* code, Call call, FunctionPtr function) |
430 | { |
431 | if (!call.isFlagSet(flag: Call::Near)) |
432 | X86Assembler::linkPointer(code, where: X86Assembler::labelFor(jump: call.m_jmp, offset: -REPTACH_OFFSET_CALL_R11), value: function.value()); |
433 | else |
434 | X86Assembler::linkCall(code, from: call.m_jmp, to: function.value()); |
435 | } |
436 | |
437 | static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) |
438 | { |
439 | X86Assembler::repatchPointer(where: call.dataLabelPtrAtOffset(offset: -REPTACH_OFFSET_CALL_R11).dataLocation(), value: destination.executableAddress()); |
440 | } |
441 | |
442 | static void repatchCall(CodeLocationCall call, FunctionPtr destination) |
443 | { |
444 | X86Assembler::repatchPointer(where: call.dataLabelPtrAtOffset(offset: -REPTACH_OFFSET_CALL_R11).dataLocation(), value: destination.executableAddress()); |
445 | } |
446 | |
447 | }; |
448 | |
449 | } // namespace JSC |
450 | |
451 | #endif // ENABLE(ASSEMBLER) |
452 | |
453 | #endif // MacroAssemblerX86_64_h |
454 | |