1 | /* |
2 | * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "JIT.h" |
28 | |
29 | // This probably does not belong here; adding here for now as a quick Windows build fix. |
30 | #if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X) |
31 | #include "MacroAssembler.h" |
32 | JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2; |
33 | #endif |
34 | |
35 | #if ENABLE(JIT) |
36 | |
37 | #include "CodeBlock.h" |
38 | #include "Interpreter.h" |
39 | #include "JITInlineMethods.h" |
40 | #include "JITStubCall.h" |
41 | #include "JSArray.h" |
42 | #include "JSFunction.h" |
43 | #include "LinkBuffer.h" |
44 | #include "RepatchBuffer.h" |
45 | #include "ResultType.h" |
46 | #include "SamplingTool.h" |
47 | |
48 | #ifndef NDEBUG |
49 | #include <stdio.h> |
50 | #endif |
51 | |
52 | using namespace std; |
53 | |
54 | namespace JSC { |
55 | |
56 | void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) |
57 | { |
58 | RepatchBuffer repatchBuffer(codeblock); |
59 | repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction); |
60 | } |
61 | |
62 | void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) |
63 | { |
64 | RepatchBuffer repatchBuffer(codeblock); |
65 | repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction); |
66 | } |
67 | |
68 | void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction) |
69 | { |
70 | RepatchBuffer repatchBuffer(codeblock); |
71 | repatchBuffer.relinkCallerToFunction(returnAddress, function: newCalleeFunction); |
72 | } |
73 | |
74 | JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock) |
75 | : m_interpreter(globalData->interpreter) |
76 | , m_globalData(globalData) |
77 | , m_codeBlock(codeBlock) |
78 | , m_labels(codeBlock ? codeBlock->instructions().size() : 0) |
79 | , m_propertyAccessCompilationInfo(codeBlock ? codeBlock->numberOfStructureStubInfos() : 0) |
80 | , m_callStructureStubCompilationInfo(codeBlock ? codeBlock->numberOfCallLinkInfos() : 0) |
81 | , m_bytecodeIndex((unsigned)-1) |
82 | #if USE(JSVALUE32_64) |
83 | , m_jumpTargetIndex(0) |
84 | , m_mappedBytecodeIndex((unsigned)-1) |
85 | , m_mappedVirtualRegisterIndex((unsigned)-1) |
86 | , m_mappedTag((RegisterID)-1) |
87 | , m_mappedPayload((RegisterID)-1) |
88 | #else |
89 | , m_lastResultBytecodeRegister(std::numeric_limits<int>::max()) |
90 | , m_jumpTargetsPosition(0) |
91 | #endif |
92 | { |
93 | } |
94 | |
95 | #if USE(JSVALUE32_64) |
96 | void JIT::emitTimeoutCheck() |
97 | { |
98 | Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister); |
99 | JITStubCall stubCall(this, cti_timeout_check); |
100 | stubCall.addArgument(regT1, regT0); // save last result registers. |
101 | stubCall.call(timeoutCheckRegister); |
102 | stubCall.getArgument(0, regT1, regT0); // reload last result registers. |
103 | skipTimeout.link(this); |
104 | } |
105 | #else |
106 | void JIT::emitTimeoutCheck() |
107 | { |
108 | Jump skipTimeout = branchSub32(cond: NonZero, imm: Imm32(1), dest: timeoutCheckRegister); |
109 | JITStubCall(this, cti_timeout_check).call(dst: timeoutCheckRegister); |
110 | skipTimeout.link(masm: this); |
111 | |
112 | killLastResultRegister(); |
113 | } |
114 | #endif |
115 | |
116 | #define NEXT_OPCODE(name) \ |
117 | m_bytecodeIndex += OPCODE_LENGTH(name); \ |
118 | break; |
119 | |
120 | #if USE(JSVALUE32_64) |
121 | #define DEFINE_BINARY_OP(name) \ |
122 | case name: { \ |
123 | JITStubCall stubCall(this, cti_##name); \ |
124 | stubCall.addArgument(currentInstruction[2].u.operand); \ |
125 | stubCall.addArgument(currentInstruction[3].u.operand); \ |
126 | stubCall.call(currentInstruction[1].u.operand); \ |
127 | NEXT_OPCODE(name); \ |
128 | } |
129 | |
130 | #define DEFINE_UNARY_OP(name) \ |
131 | case name: { \ |
132 | JITStubCall stubCall(this, cti_##name); \ |
133 | stubCall.addArgument(currentInstruction[2].u.operand); \ |
134 | stubCall.call(currentInstruction[1].u.operand); \ |
135 | NEXT_OPCODE(name); \ |
136 | } |
137 | |
138 | #else // USE(JSVALUE32_64) |
139 | |
140 | #define DEFINE_BINARY_OP(name) \ |
141 | case name: { \ |
142 | JITStubCall stubCall(this, cti_##name); \ |
143 | stubCall.addArgument(currentInstruction[2].u.operand, regT2); \ |
144 | stubCall.addArgument(currentInstruction[3].u.operand, regT2); \ |
145 | stubCall.call(currentInstruction[1].u.operand); \ |
146 | NEXT_OPCODE(name); \ |
147 | } |
148 | |
149 | #define DEFINE_UNARY_OP(name) \ |
150 | case name: { \ |
151 | JITStubCall stubCall(this, cti_##name); \ |
152 | stubCall.addArgument(currentInstruction[2].u.operand, regT2); \ |
153 | stubCall.call(currentInstruction[1].u.operand); \ |
154 | NEXT_OPCODE(name); \ |
155 | } |
156 | #endif // USE(JSVALUE32_64) |
157 | |
158 | #define DEFINE_OP(name) \ |
159 | case name: { \ |
160 | emit_##name(currentInstruction); \ |
161 | NEXT_OPCODE(name); \ |
162 | } |
163 | |
164 | #define DEFINE_SLOWCASE_OP(name) \ |
165 | case name: { \ |
166 | emitSlow_##name(currentInstruction, iter); \ |
167 | NEXT_OPCODE(name); \ |
168 | } |
169 | |
170 | void JIT::privateCompileMainPass() |
171 | { |
172 | Instruction* instructionsBegin = m_codeBlock->instructions().begin(); |
173 | unsigned instructionCount = m_codeBlock->instructions().size(); |
174 | |
175 | m_propertyAccessInstructionIndex = 0; |
176 | m_globalResolveInfoIndex = 0; |
177 | m_callLinkInfoIndex = 0; |
178 | |
179 | for (m_bytecodeIndex = 0; m_bytecodeIndex < instructionCount; ) { |
180 | Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex; |
181 | ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d" , m_bytecodeIndex); |
182 | |
183 | #if ENABLE(OPCODE_SAMPLING) |
184 | if (m_bytecodeIndex > 0) // Avoid the overhead of sampling op_enter twice. |
185 | sampleInstruction(currentInstruction); |
186 | #endif |
187 | |
188 | #if !USE(JSVALUE32_64) |
189 | if (m_labels[m_bytecodeIndex].isUsed()) |
190 | killLastResultRegister(); |
191 | #endif |
192 | |
193 | m_labels[m_bytecodeIndex] = label(); |
194 | |
195 | switch (m_interpreter->getOpcodeID(opcode: currentInstruction->u.opcode)) { |
196 | DEFINE_BINARY_OP(op_del_by_val) |
197 | #if USE(JSVALUE32) |
198 | DEFINE_BINARY_OP(op_div) |
199 | #endif |
200 | DEFINE_BINARY_OP(op_in) |
201 | DEFINE_BINARY_OP(op_less) |
202 | DEFINE_BINARY_OP(op_lesseq) |
203 | DEFINE_BINARY_OP(op_urshift) |
204 | DEFINE_UNARY_OP(op_is_boolean) |
205 | DEFINE_UNARY_OP(op_is_function) |
206 | DEFINE_UNARY_OP(op_is_number) |
207 | DEFINE_UNARY_OP(op_is_object) |
208 | DEFINE_UNARY_OP(op_is_string) |
209 | DEFINE_UNARY_OP(op_is_undefined) |
210 | #if !USE(JSVALUE32_64) |
211 | DEFINE_UNARY_OP(op_negate) |
212 | #endif |
213 | DEFINE_UNARY_OP(op_typeof) |
214 | |
215 | DEFINE_OP(op_add) |
216 | DEFINE_OP(op_bitand) |
217 | DEFINE_OP(op_bitnot) |
218 | DEFINE_OP(op_bitor) |
219 | DEFINE_OP(op_bitxor) |
220 | DEFINE_OP(op_call) |
221 | DEFINE_OP(op_call_eval) |
222 | DEFINE_OP(op_call_varargs) |
223 | DEFINE_OP(op_catch) |
224 | DEFINE_OP(op_construct) |
225 | DEFINE_OP(op_construct_verify) |
226 | DEFINE_OP(op_convert_this) |
227 | DEFINE_OP(op_init_arguments) |
228 | DEFINE_OP(op_create_arguments) |
229 | DEFINE_OP(op_debug) |
230 | DEFINE_OP(op_del_by_id) |
231 | #if !USE(JSVALUE32) |
232 | DEFINE_OP(op_div) |
233 | #endif |
234 | DEFINE_OP(op_end) |
235 | DEFINE_OP(op_enter) |
236 | DEFINE_OP(op_enter_with_activation) |
237 | DEFINE_OP(op_eq) |
238 | DEFINE_OP(op_eq_null) |
239 | DEFINE_OP(op_get_by_id) |
240 | DEFINE_OP(op_get_by_val) |
241 | DEFINE_OP(op_get_by_pname) |
242 | DEFINE_OP(op_get_global_var) |
243 | DEFINE_OP(op_get_pnames) |
244 | DEFINE_OP(op_get_scoped_var) |
245 | DEFINE_OP(op_instanceof) |
246 | DEFINE_OP(op_jeq_null) |
247 | DEFINE_OP(op_jfalse) |
248 | DEFINE_OP(op_jmp) |
249 | DEFINE_OP(op_jmp_scopes) |
250 | DEFINE_OP(op_jneq_null) |
251 | DEFINE_OP(op_jneq_ptr) |
252 | DEFINE_OP(op_jnless) |
253 | DEFINE_OP(op_jless) |
254 | DEFINE_OP(op_jnlesseq) |
255 | DEFINE_OP(op_jsr) |
256 | DEFINE_OP(op_jtrue) |
257 | DEFINE_OP(op_load_varargs) |
258 | DEFINE_OP(op_loop) |
259 | DEFINE_OP(op_loop_if_less) |
260 | DEFINE_OP(op_loop_if_lesseq) |
261 | DEFINE_OP(op_loop_if_true) |
262 | DEFINE_OP(op_loop_if_false) |
263 | DEFINE_OP(op_lshift) |
264 | DEFINE_OP(op_method_check) |
265 | DEFINE_OP(op_mod) |
266 | DEFINE_OP(op_mov) |
267 | DEFINE_OP(op_mul) |
268 | #if USE(JSVALUE32_64) |
269 | DEFINE_OP(op_negate) |
270 | #endif |
271 | DEFINE_OP(op_neq) |
272 | DEFINE_OP(op_neq_null) |
273 | DEFINE_OP(op_new_array) |
274 | DEFINE_OP(op_new_error) |
275 | DEFINE_OP(op_new_func) |
276 | DEFINE_OP(op_new_func_exp) |
277 | DEFINE_OP(op_new_object) |
278 | DEFINE_OP(op_new_regexp) |
279 | DEFINE_OP(op_next_pname) |
280 | DEFINE_OP(op_not) |
281 | DEFINE_OP(op_nstricteq) |
282 | DEFINE_OP(op_pop_scope) |
283 | DEFINE_OP(op_post_dec) |
284 | DEFINE_OP(op_post_inc) |
285 | DEFINE_OP(op_pre_dec) |
286 | DEFINE_OP(op_pre_inc) |
287 | DEFINE_OP(op_profile_did_call) |
288 | DEFINE_OP(op_profile_will_call) |
289 | DEFINE_OP(op_push_new_scope) |
290 | DEFINE_OP(op_push_scope) |
291 | DEFINE_OP(op_put_by_id) |
292 | DEFINE_OP(op_put_by_index) |
293 | DEFINE_OP(op_put_by_val) |
294 | DEFINE_OP(op_put_getter) |
295 | DEFINE_OP(op_put_global_var) |
296 | DEFINE_OP(op_put_scoped_var) |
297 | DEFINE_OP(op_put_setter) |
298 | DEFINE_OP(op_resolve) |
299 | DEFINE_OP(op_resolve_base) |
300 | DEFINE_OP(op_resolve_global) |
301 | DEFINE_OP(op_resolve_skip) |
302 | DEFINE_OP(op_resolve_with_base) |
303 | DEFINE_OP(op_ret) |
304 | DEFINE_OP(op_rshift) |
305 | DEFINE_OP(op_sret) |
306 | DEFINE_OP(op_strcat) |
307 | DEFINE_OP(op_stricteq) |
308 | DEFINE_OP(op_sub) |
309 | DEFINE_OP(op_switch_char) |
310 | DEFINE_OP(op_switch_imm) |
311 | DEFINE_OP(op_switch_string) |
312 | DEFINE_OP(op_tear_off_activation) |
313 | DEFINE_OP(op_tear_off_arguments) |
314 | DEFINE_OP(op_throw) |
315 | DEFINE_OP(op_to_jsnumber) |
316 | DEFINE_OP(op_to_primitive) |
317 | |
318 | case op_get_array_length: |
319 | case op_get_by_id_chain: |
320 | case op_get_by_id_generic: |
321 | case op_get_by_id_proto: |
322 | case op_get_by_id_proto_list: |
323 | case op_get_by_id_self: |
324 | case op_get_by_id_self_list: |
325 | case op_get_string_length: |
326 | case op_put_by_id_generic: |
327 | case op_put_by_id_replace: |
328 | case op_put_by_id_transition: |
329 | ASSERT_NOT_REACHED(); |
330 | } |
331 | } |
332 | |
333 | ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos()); |
334 | ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos()); |
335 | |
336 | #ifndef NDEBUG |
337 | // Reset this, in order to guard its use with ASSERTs. |
338 | m_bytecodeIndex = (unsigned)-1; |
339 | #endif |
340 | } |
341 | |
342 | |
343 | void JIT::privateCompileLinkPass() |
344 | { |
345 | unsigned jmpTableCount = m_jmpTable.size(); |
346 | for (unsigned i = 0; i < jmpTableCount; ++i) |
347 | m_jmpTable[i].from.linkTo(label: m_labels[m_jmpTable[i].toBytecodeIndex], masm: this); |
348 | m_jmpTable.clear(); |
349 | } |
350 | |
351 | void JIT::privateCompileSlowCases() |
352 | { |
353 | Instruction* instructionsBegin = m_codeBlock->instructions().begin(); |
354 | |
355 | m_propertyAccessInstructionIndex = 0; |
356 | #if USE(JSVALUE32_64) |
357 | m_globalResolveInfoIndex = 0; |
358 | #endif |
359 | m_callLinkInfoIndex = 0; |
360 | |
361 | for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) { |
362 | #if !USE(JSVALUE32_64) |
363 | killLastResultRegister(); |
364 | #endif |
365 | |
366 | m_bytecodeIndex = iter->to; |
367 | #ifndef NDEBUG |
368 | unsigned firstTo = m_bytecodeIndex; |
369 | #endif |
370 | Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex; |
371 | |
372 | switch (m_interpreter->getOpcodeID(opcode: currentInstruction->u.opcode)) { |
373 | DEFINE_SLOWCASE_OP(op_add) |
374 | DEFINE_SLOWCASE_OP(op_bitand) |
375 | DEFINE_SLOWCASE_OP(op_bitnot) |
376 | DEFINE_SLOWCASE_OP(op_bitor) |
377 | DEFINE_SLOWCASE_OP(op_bitxor) |
378 | DEFINE_SLOWCASE_OP(op_call) |
379 | DEFINE_SLOWCASE_OP(op_call_eval) |
380 | DEFINE_SLOWCASE_OP(op_call_varargs) |
381 | DEFINE_SLOWCASE_OP(op_construct) |
382 | DEFINE_SLOWCASE_OP(op_construct_verify) |
383 | DEFINE_SLOWCASE_OP(op_convert_this) |
384 | #if !USE(JSVALUE32) |
385 | DEFINE_SLOWCASE_OP(op_div) |
386 | #endif |
387 | DEFINE_SLOWCASE_OP(op_eq) |
388 | DEFINE_SLOWCASE_OP(op_get_by_id) |
389 | DEFINE_SLOWCASE_OP(op_get_by_val) |
390 | DEFINE_SLOWCASE_OP(op_get_by_pname) |
391 | DEFINE_SLOWCASE_OP(op_instanceof) |
392 | DEFINE_SLOWCASE_OP(op_jfalse) |
393 | DEFINE_SLOWCASE_OP(op_jnless) |
394 | DEFINE_SLOWCASE_OP(op_jless) |
395 | DEFINE_SLOWCASE_OP(op_jnlesseq) |
396 | DEFINE_SLOWCASE_OP(op_jtrue) |
397 | DEFINE_SLOWCASE_OP(op_loop_if_less) |
398 | DEFINE_SLOWCASE_OP(op_loop_if_lesseq) |
399 | DEFINE_SLOWCASE_OP(op_loop_if_true) |
400 | DEFINE_SLOWCASE_OP(op_loop_if_false) |
401 | DEFINE_SLOWCASE_OP(op_lshift) |
402 | DEFINE_SLOWCASE_OP(op_method_check) |
403 | DEFINE_SLOWCASE_OP(op_mod) |
404 | DEFINE_SLOWCASE_OP(op_mul) |
405 | #if USE(JSVALUE32_64) |
406 | DEFINE_SLOWCASE_OP(op_negate) |
407 | #endif |
408 | DEFINE_SLOWCASE_OP(op_neq) |
409 | DEFINE_SLOWCASE_OP(op_not) |
410 | DEFINE_SLOWCASE_OP(op_nstricteq) |
411 | DEFINE_SLOWCASE_OP(op_post_dec) |
412 | DEFINE_SLOWCASE_OP(op_post_inc) |
413 | DEFINE_SLOWCASE_OP(op_pre_dec) |
414 | DEFINE_SLOWCASE_OP(op_pre_inc) |
415 | DEFINE_SLOWCASE_OP(op_put_by_id) |
416 | DEFINE_SLOWCASE_OP(op_put_by_val) |
417 | #if USE(JSVALUE32_64) |
418 | DEFINE_SLOWCASE_OP(op_resolve_global) |
419 | #endif |
420 | DEFINE_SLOWCASE_OP(op_rshift) |
421 | DEFINE_SLOWCASE_OP(op_stricteq) |
422 | DEFINE_SLOWCASE_OP(op_sub) |
423 | DEFINE_SLOWCASE_OP(op_to_jsnumber) |
424 | DEFINE_SLOWCASE_OP(op_to_primitive) |
425 | default: |
426 | ASSERT_NOT_REACHED(); |
427 | } |
428 | |
429 | ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to,"Not enough jumps linked in slow case codegen." ); |
430 | ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen." ); |
431 | |
432 | emitJumpSlowToHot(jump: jump(), relativeOffset: 0); |
433 | } |
434 | |
435 | #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
436 | ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos()); |
437 | #endif |
438 | ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos()); |
439 | |
440 | #ifndef NDEBUG |
441 | // Reset this, in order to guard its use with ASSERTs. |
442 | m_bytecodeIndex = (unsigned)-1; |
443 | #endif |
444 | } |
445 | |
446 | JITCode JIT::privateCompile() |
447 | { |
448 | sampleCodeBlock(m_codeBlock); |
449 | #if ENABLE(OPCODE_SAMPLING) |
450 | sampleInstruction(m_codeBlock->instructions().begin()); |
451 | #endif |
452 | |
453 | // Could use a pop_m, but would need to offset the following instruction if so. |
454 | preserveReturnAddressAfterCall(reg: regT2); |
455 | emitPutToCallFrameHeader(from: regT2, entry: RegisterFile::ReturnPC); |
456 | |
457 | Jump slowRegisterFileCheck; |
458 | Label afterRegisterFileCheck; |
459 | if (m_codeBlock->codeType() == FunctionCode) { |
460 | // In the case of a fast linked call, we do not set this up in the caller. |
461 | emitPutImmediateToCallFrameHeader(value: m_codeBlock, entry: RegisterFile::CodeBlock); |
462 | |
463 | peek(dest: regT0, OBJECT_OFFSETOF(JITStackFrame, registerFile) / sizeof (void*)); |
464 | addPtr(imm: Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), src: callFrameRegister, dest: regT1); |
465 | |
466 | slowRegisterFileCheck = branchPtr(cond: Above, left: regT1, right: Address(regT0, OBJECT_OFFSETOF(RegisterFile, m_end))); |
467 | afterRegisterFileCheck = label(); |
468 | } |
469 | |
470 | privateCompileMainPass(); |
471 | privateCompileLinkPass(); |
472 | privateCompileSlowCases(); |
473 | |
474 | if (m_codeBlock->codeType() == FunctionCode) { |
475 | slowRegisterFileCheck.link(masm: this); |
476 | m_bytecodeIndex = 0; |
477 | JITStubCall(this, cti_register_file_check).call(); |
478 | #ifndef NDEBUG |
479 | m_bytecodeIndex = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs. |
480 | #endif |
481 | jump(target: afterRegisterFileCheck); |
482 | } |
483 | |
484 | ASSERT(m_jmpTable.isEmpty()); |
485 | |
486 | LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(n: m_assembler.size())); |
487 | |
488 | // Translate vPC offsets into addresses in JIT generated code, for switch tables. |
489 | for (unsigned i = 0; i < m_switches.size(); ++i) { |
490 | SwitchRecord record = m_switches[i]; |
491 | unsigned bytecodeIndex = record.bytecodeIndex; |
492 | |
493 | if (record.type != SwitchRecord::String) { |
494 | ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); |
495 | ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size()); |
496 | |
497 | record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(label: m_labels[bytecodeIndex + record.defaultOffset]); |
498 | |
499 | for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) { |
500 | unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j]; |
501 | record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(label: m_labels[bytecodeIndex + offset]) : record.jumpTable.simpleJumpTable->ctiDefault; |
502 | } |
503 | } else { |
504 | ASSERT(record.type == SwitchRecord::String); |
505 | |
506 | record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(label: m_labels[bytecodeIndex + record.defaultOffset]); |
507 | |
508 | StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end(); |
509 | for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) { |
510 | unsigned offset = it->second.branchOffset; |
511 | it->second.ctiOffset = offset ? patchBuffer.locationOf(label: m_labels[bytecodeIndex + offset]) : record.jumpTable.stringJumpTable->ctiDefault; |
512 | } |
513 | } |
514 | } |
515 | |
516 | for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) { |
517 | HandlerInfo& handler = m_codeBlock->exceptionHandler(index: i); |
518 | handler.nativeCode = patchBuffer.locationOf(label: m_labels[handler.target]); |
519 | } |
520 | |
521 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { |
522 | if (iter->to) |
523 | patchBuffer.link(call: iter->from, function: FunctionPtr(iter->to)); |
524 | } |
525 | |
526 | if (m_codeBlock->hasExceptionInfo()) { |
527 | m_codeBlock->callReturnIndexVector().reserveCapacity(newCapacity: m_calls.size()); |
528 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) |
529 | m_codeBlock->callReturnIndexVector().append(val: CallReturnOffsetToBytecodeIndex(patchBuffer.returnAddressOffset(call: iter->from), iter->bytecodeIndex)); |
530 | } |
531 | |
532 | // Link absolute addresses for jsr |
533 | for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter) |
534 | patchBuffer.patch(label: iter->storeLocation, value: patchBuffer.locationOf(label: iter->target).executableAddress()); |
535 | |
536 | #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
537 | for (unsigned i = 0; i < m_codeBlock->numberOfStructureStubInfos(); ++i) { |
538 | StructureStubInfo& info = m_codeBlock->structureStubInfo(index: i); |
539 | info.callReturnLocation = patchBuffer.locationOf(call: m_propertyAccessCompilationInfo[i].callReturnLocation); |
540 | info.hotPathBegin = patchBuffer.locationOf(label: m_propertyAccessCompilationInfo[i].hotPathBegin); |
541 | } |
542 | #endif |
543 | #if ENABLE(JIT_OPTIMIZE_CALL) |
544 | for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) { |
545 | CallLinkInfo& info = m_codeBlock->callLinkInfo(index: i); |
546 | info.ownerCodeBlock = m_codeBlock; |
547 | info.callReturnLocation = patchBuffer.locationOfNearCall(call: m_callStructureStubCompilationInfo[i].callReturnLocation); |
548 | info.hotPathBegin = patchBuffer.locationOf(label: m_callStructureStubCompilationInfo[i].hotPathBegin); |
549 | info.hotPathOther = patchBuffer.locationOfNearCall(call: m_callStructureStubCompilationInfo[i].hotPathOther); |
550 | } |
551 | #endif |
552 | unsigned methodCallCount = m_methodCallCompilationInfo.size(); |
553 | m_codeBlock->addMethodCallLinkInfos(n: methodCallCount); |
554 | for (unsigned i = 0; i < methodCallCount; ++i) { |
555 | MethodCallLinkInfo& info = m_codeBlock->methodCallLinkInfo(index: i); |
556 | info.structureLabel = patchBuffer.locationOf(label: m_methodCallCompilationInfo[i].structureToCompare); |
557 | info.callReturnLocation = m_codeBlock->structureStubInfo(index: m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation; |
558 | } |
559 | |
560 | return patchBuffer.finalizeCode(); |
561 | } |
562 | |
563 | #if !USE(JSVALUE32_64) |
564 | void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst) |
565 | { |
566 | loadPtr(address: Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), dest: dst); |
567 | loadPtr(address: Address(dst, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), dest: dst); |
568 | loadPtr(address: Address(dst, index * sizeof(Register)), dest: dst); |
569 | } |
570 | |
571 | void JIT::emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index) |
572 | { |
573 | loadPtr(address: Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), dest: variableObject); |
574 | loadPtr(address: Address(variableObject, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), dest: variableObject); |
575 | storePtr(src, address: Address(variableObject, index * sizeof(Register))); |
576 | } |
577 | #endif |
578 | |
579 | #if ENABLE(JIT_OPTIMIZE_CALL) |
580 | void JIT::unlinkCall(CallLinkInfo* callLinkInfo) |
581 | { |
582 | // When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid |
583 | // (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive |
584 | // match). Reset the check so it no longer matches. |
585 | RepatchBuffer repatchBuffer(callLinkInfo->ownerCodeBlock.get()); |
586 | #if USE(JSVALUE32_64) |
587 | repatchBuffer.repatch(callLinkInfo->hotPathBegin, 0); |
588 | #else |
589 | repatchBuffer.repatch(dataLabelPtr: callLinkInfo->hotPathBegin, value: JSValue::encode(value: JSValue())); |
590 | #endif |
591 | } |
592 | |
593 | void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JITCode& code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData) |
594 | { |
595 | RepatchBuffer repatchBuffer(callerCodeBlock); |
596 | |
597 | // Currently we only link calls with the exact number of arguments. |
598 | // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant |
599 | if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) { |
600 | ASSERT(!callLinkInfo->isLinked()); |
601 | |
602 | if (calleeCodeBlock) |
603 | calleeCodeBlock->addCaller(caller: callLinkInfo); |
604 | |
605 | repatchBuffer.repatch(dataLabelPtr: callLinkInfo->hotPathBegin, value: callee); |
606 | repatchBuffer.relink(nearCall: callLinkInfo->hotPathOther, destination: code.addressForCall()); |
607 | } |
608 | |
609 | // patch the call so we do not continue to try to link. |
610 | repatchBuffer.relink(nearCall: callLinkInfo->callReturnLocation, destination: globalData->jitStubs.ctiVirtualCall()); |
611 | } |
612 | #endif // ENABLE(JIT_OPTIMIZE_CALL) |
613 | |
614 | } // namespace JSC |
615 | |
616 | #endif // ENABLE(JIT) |
617 | |