1 | /* |
2 | * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "JIT.h" |
28 | |
29 | #if ENABLE(JIT) |
30 | |
31 | #include "CodeBlock.h" |
32 | #include "JITInlineMethods.h" |
33 | #include "JITStubCall.h" |
34 | #include "JSArray.h" |
35 | #include "JSFunction.h" |
36 | #include "JSPropertyNameIterator.h" |
37 | #include "Interpreter.h" |
38 | #include "LinkBuffer.h" |
39 | #include "RepatchBuffer.h" |
40 | #include "ResultType.h" |
41 | #include "SamplingTool.h" |
42 | |
43 | #ifndef NDEBUG |
44 | #include <stdio.h> |
45 | #endif |
46 | |
47 | using namespace std; |
48 | |
49 | namespace JSC { |
50 | |
51 | #if USE(JSVALUE32_64) |
52 | |
53 | void JIT::emit_op_put_by_index(Instruction* currentInstruction) |
54 | { |
55 | unsigned base = currentInstruction[1].u.operand; |
56 | unsigned property = currentInstruction[2].u.operand; |
57 | unsigned value = currentInstruction[3].u.operand; |
58 | |
59 | JITStubCall stubCall(this, cti_op_put_by_index); |
60 | stubCall.addArgument(base); |
61 | stubCall.addArgument(Imm32(property)); |
62 | stubCall.addArgument(value); |
63 | stubCall.call(); |
64 | } |
65 | |
66 | void JIT::emit_op_put_getter(Instruction* currentInstruction) |
67 | { |
68 | unsigned base = currentInstruction[1].u.operand; |
69 | unsigned property = currentInstruction[2].u.operand; |
70 | unsigned function = currentInstruction[3].u.operand; |
71 | |
72 | JITStubCall stubCall(this, cti_op_put_getter); |
73 | stubCall.addArgument(base); |
74 | stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property))); |
75 | stubCall.addArgument(function); |
76 | stubCall.call(); |
77 | } |
78 | |
79 | void JIT::emit_op_put_setter(Instruction* currentInstruction) |
80 | { |
81 | unsigned base = currentInstruction[1].u.operand; |
82 | unsigned property = currentInstruction[2].u.operand; |
83 | unsigned function = currentInstruction[3].u.operand; |
84 | |
85 | JITStubCall stubCall(this, cti_op_put_setter); |
86 | stubCall.addArgument(base); |
87 | stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property))); |
88 | stubCall.addArgument(function); |
89 | stubCall.call(); |
90 | } |
91 | |
92 | void JIT::emit_op_del_by_id(Instruction* currentInstruction) |
93 | { |
94 | unsigned dst = currentInstruction[1].u.operand; |
95 | unsigned base = currentInstruction[2].u.operand; |
96 | unsigned property = currentInstruction[3].u.operand; |
97 | |
98 | JITStubCall stubCall(this, cti_op_del_by_id); |
99 | stubCall.addArgument(base); |
100 | stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property))); |
101 | stubCall.call(dst); |
102 | } |
103 | |
104 | |
105 | #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
106 | |
107 | /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */ |
108 | |
109 | // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair. |
110 | void JIT::emit_op_method_check(Instruction*) {} |
111 | void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); } |
112 | #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS) |
113 | #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS" |
114 | #endif |
115 | |
116 | void JIT::emit_op_get_by_val(Instruction* currentInstruction) |
117 | { |
118 | unsigned dst = currentInstruction[1].u.operand; |
119 | unsigned base = currentInstruction[2].u.operand; |
120 | unsigned property = currentInstruction[3].u.operand; |
121 | |
122 | JITStubCall stubCall(this, cti_op_get_by_val); |
123 | stubCall.addArgument(base); |
124 | stubCall.addArgument(property); |
125 | stubCall.call(dst); |
126 | } |
127 | |
128 | void JIT::emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&) |
129 | { |
130 | ASSERT_NOT_REACHED(); |
131 | } |
132 | |
133 | void JIT::emit_op_put_by_val(Instruction* currentInstruction) |
134 | { |
135 | unsigned base = currentInstruction[1].u.operand; |
136 | unsigned property = currentInstruction[2].u.operand; |
137 | unsigned value = currentInstruction[3].u.operand; |
138 | |
139 | JITStubCall stubCall(this, cti_op_put_by_val); |
140 | stubCall.addArgument(base); |
141 | stubCall.addArgument(property); |
142 | stubCall.addArgument(value); |
143 | stubCall.call(); |
144 | } |
145 | |
146 | void JIT::emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&) |
147 | { |
148 | ASSERT_NOT_REACHED(); |
149 | } |
150 | |
151 | void JIT::emit_op_get_by_id(Instruction* currentInstruction) |
152 | { |
153 | int dst = currentInstruction[1].u.operand; |
154 | int base = currentInstruction[2].u.operand; |
155 | int ident = currentInstruction[3].u.operand; |
156 | |
157 | JITStubCall stubCall(this, cti_op_get_by_id_generic); |
158 | stubCall.addArgument(base); |
159 | stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident)))); |
160 | stubCall.call(dst); |
161 | |
162 | m_propertyAccessInstructionIndex++; |
163 | } |
164 | |
165 | void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&) |
166 | { |
167 | m_propertyAccessInstructionIndex++; |
168 | ASSERT_NOT_REACHED(); |
169 | } |
170 | |
171 | void JIT::emit_op_put_by_id(Instruction* currentInstruction) |
172 | { |
173 | int base = currentInstruction[1].u.operand; |
174 | int ident = currentInstruction[2].u.operand; |
175 | int value = currentInstruction[3].u.operand; |
176 | |
177 | JITStubCall stubCall(this, cti_op_put_by_id_generic); |
178 | stubCall.addArgument(base); |
179 | stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident)))); |
180 | stubCall.addArgument(value); |
181 | stubCall.call(); |
182 | |
183 | m_propertyAccessInstructionIndex++; |
184 | } |
185 | |
186 | void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&) |
187 | { |
188 | m_propertyAccessInstructionIndex++; |
189 | ASSERT_NOT_REACHED(); |
190 | } |
191 | |
192 | #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
193 | |
194 | /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */ |
195 | |
196 | #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS) |
197 | |
198 | void JIT::emit_op_method_check(Instruction* currentInstruction) |
199 | { |
200 | // Assert that the following instruction is a get_by_id. |
201 | ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id); |
202 | |
203 | currentInstruction += OPCODE_LENGTH(op_method_check); |
204 | |
205 | // Do the method check - check the object & its prototype's structure inline (this is the common case). |
206 | m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex)); |
207 | MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last(); |
208 | |
209 | int dst = currentInstruction[1].u.operand; |
210 | int base = currentInstruction[2].u.operand; |
211 | |
212 | emitLoad(base, regT1, regT0); |
213 | emitJumpSlowCaseIfNotJSCell(base, regT1); |
214 | |
215 | BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck); |
216 | |
217 | Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); |
218 | DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT2); |
219 | Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); |
220 | |
221 | // This will be relinked to load the function without doing a load. |
222 | DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0); |
223 | |
224 | END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck); |
225 | |
226 | move(Imm32(JSValue::CellTag), regT1); |
227 | Jump match = jump(); |
228 | |
229 | ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj); |
230 | ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct); |
231 | ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction); |
232 | |
233 | // Link the failure cases here. |
234 | structureCheck.link(this); |
235 | protoStructureCheck.link(this); |
236 | |
237 | // Do a regular(ish) get_by_id (the slow case will be link to |
238 | // cti_op_get_by_id_method_check instead of cti_op_get_by_id. |
239 | compileGetByIdHotPath(); |
240 | |
241 | match.link(this); |
242 | emitStore(dst, regT1, regT0); |
243 | map(m_bytecodeIndex + OPCODE_LENGTH(op_method_check), dst, regT1, regT0); |
244 | |
245 | // We've already generated the following get_by_id, so make sure it's skipped over. |
246 | m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id); |
247 | } |
248 | |
249 | void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
250 | { |
251 | currentInstruction += OPCODE_LENGTH(op_method_check); |
252 | |
253 | int dst = currentInstruction[1].u.operand; |
254 | int base = currentInstruction[2].u.operand; |
255 | int ident = currentInstruction[3].u.operand; |
256 | |
257 | compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true); |
258 | |
259 | // We've already generated the following get_by_id, so make sure it's skipped over. |
260 | m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id); |
261 | } |
262 | |
263 | #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS) |
264 | |
265 | // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair. |
266 | void JIT::emit_op_method_check(Instruction*) {} |
267 | void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); } |
268 | |
269 | #endif |
270 | |
271 | void JIT::emit_op_get_by_val(Instruction* currentInstruction) |
272 | { |
273 | unsigned dst = currentInstruction[1].u.operand; |
274 | unsigned base = currentInstruction[2].u.operand; |
275 | unsigned property = currentInstruction[3].u.operand; |
276 | |
277 | emitLoad2(base, regT1, regT0, property, regT3, regT2); |
278 | |
279 | addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); |
280 | emitJumpSlowCaseIfNotJSCell(base, regT1); |
281 | addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr))); |
282 | |
283 | loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3); |
284 | addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength)))); |
285 | |
286 | load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag |
287 | load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload |
288 | addSlowCase(branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag))); |
289 | |
290 | emitStore(dst, regT1, regT0); |
291 | map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0); |
292 | } |
293 | |
294 | void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
295 | { |
296 | unsigned dst = currentInstruction[1].u.operand; |
297 | unsigned base = currentInstruction[2].u.operand; |
298 | unsigned property = currentInstruction[3].u.operand; |
299 | |
300 | linkSlowCase(iter); // property int32 check |
301 | linkSlowCaseIfNotJSCell(iter, base); // base cell check |
302 | linkSlowCase(iter); // base array check |
303 | linkSlowCase(iter); // vector length check |
304 | linkSlowCase(iter); // empty value |
305 | |
306 | JITStubCall stubCall(this, cti_op_get_by_val); |
307 | stubCall.addArgument(base); |
308 | stubCall.addArgument(property); |
309 | stubCall.call(dst); |
310 | } |
311 | |
312 | void JIT::emit_op_put_by_val(Instruction* currentInstruction) |
313 | { |
314 | unsigned base = currentInstruction[1].u.operand; |
315 | unsigned property = currentInstruction[2].u.operand; |
316 | unsigned value = currentInstruction[3].u.operand; |
317 | |
318 | emitLoad2(base, regT1, regT0, property, regT3, regT2); |
319 | |
320 | addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); |
321 | emitJumpSlowCaseIfNotJSCell(base, regT1); |
322 | addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr))); |
323 | addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength)))); |
324 | |
325 | loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3); |
326 | |
327 | Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::EmptyValueTag)); |
328 | |
329 | Label storeResult(this); |
330 | emitLoad(value, regT1, regT0); |
331 | store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); // payload |
332 | store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4)); // tag |
333 | Jump end = jump(); |
334 | |
335 | empty.link(this); |
336 | add32(Imm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); |
337 | branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this); |
338 | |
339 | add32(Imm32(1), regT2, regT0); |
340 | store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))); |
341 | jump().linkTo(storeResult, this); |
342 | |
343 | end.link(this); |
344 | } |
345 | |
346 | void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
347 | { |
348 | unsigned base = currentInstruction[1].u.operand; |
349 | unsigned property = currentInstruction[2].u.operand; |
350 | unsigned value = currentInstruction[3].u.operand; |
351 | |
352 | linkSlowCase(iter); // property int32 check |
353 | linkSlowCaseIfNotJSCell(iter, base); // base cell check |
354 | linkSlowCase(iter); // base not array check |
355 | linkSlowCase(iter); // in vector check |
356 | |
357 | JITStubCall stubPutByValCall(this, cti_op_put_by_val); |
358 | stubPutByValCall.addArgument(base); |
359 | stubPutByValCall.addArgument(property); |
360 | stubPutByValCall.addArgument(value); |
361 | stubPutByValCall.call(); |
362 | } |
363 | |
364 | void JIT::emit_op_get_by_id(Instruction* currentInstruction) |
365 | { |
366 | int dst = currentInstruction[1].u.operand; |
367 | int base = currentInstruction[2].u.operand; |
368 | |
369 | emitLoad(base, regT1, regT0); |
370 | emitJumpSlowCaseIfNotJSCell(base, regT1); |
371 | compileGetByIdHotPath(); |
372 | emitStore(dst, regT1, regT0); |
373 | map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0); |
374 | } |
375 | |
376 | void JIT::compileGetByIdHotPath() |
377 | { |
378 | // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched. |
379 | // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump |
380 | // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label |
381 | // to jump back to if one of these trampolies finds a match. |
382 | |
383 | BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); |
384 | |
385 | Label hotPathBegin(this); |
386 | m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; |
387 | m_propertyAccessInstructionIndex++; |
388 | |
389 | DataLabelPtr structureToCompare; |
390 | Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); |
391 | addSlowCase(structureCheck); |
392 | ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure); |
393 | ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase); |
394 | |
395 | Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT2); |
396 | Label externalLoadComplete(this); |
397 | ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad); |
398 | ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad); |
399 | |
400 | DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload |
401 | ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetGetByIdPropertyMapOffset1); |
402 | DataLabel32 displacementLabel2 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag |
403 | ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetGetByIdPropertyMapOffset2); |
404 | |
405 | Label putResult(this); |
406 | ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult); |
407 | |
408 | END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); |
409 | } |
410 | |
411 | void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
412 | { |
413 | int dst = currentInstruction[1].u.operand; |
414 | int base = currentInstruction[2].u.operand; |
415 | int ident = currentInstruction[3].u.operand; |
416 | |
417 | compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter); |
418 | } |
419 | |
420 | void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck) |
421 | { |
422 | // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset |
423 | // so that we only need track one pointer into the slow case code - we track a pointer to the location |
424 | // of the call (which we can use to look up the patch information), but should a array-length or |
425 | // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back |
426 | // the distance from the call to the head of the slow case. |
427 | linkSlowCaseIfNotJSCell(iter, base); |
428 | linkSlowCase(iter); |
429 | |
430 | BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase); |
431 | |
432 | #ifndef NDEBUG |
433 | Label coldPathBegin(this); |
434 | #endif |
435 | JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id); |
436 | stubCall.addArgument(regT1, regT0); |
437 | stubCall.addArgument(ImmPtr(ident)); |
438 | Call call = stubCall.call(dst); |
439 | |
440 | END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase); |
441 | |
442 | ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall); |
443 | |
444 | // Track the location of the call; this will be used to recover patch information. |
445 | m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call; |
446 | m_propertyAccessInstructionIndex++; |
447 | } |
448 | |
449 | void JIT::emit_op_put_by_id(Instruction* currentInstruction) |
450 | { |
451 | // In order to be able to patch both the Structure, and the object offset, we store one pointer, |
452 | // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code |
453 | // such that the Structure & offset are always at the same distance from this. |
454 | |
455 | int base = currentInstruction[1].u.operand; |
456 | int value = currentInstruction[3].u.operand; |
457 | |
458 | emitLoad2(base, regT1, regT0, value, regT3, regT2); |
459 | |
460 | emitJumpSlowCaseIfNotJSCell(base, regT1); |
461 | |
462 | BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById); |
463 | |
464 | Label hotPathBegin(this); |
465 | m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; |
466 | m_propertyAccessInstructionIndex++; |
467 | |
468 | // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over. |
469 | DataLabelPtr structureToCompare; |
470 | addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)))); |
471 | ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure); |
472 | |
473 | // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used. |
474 | Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0); |
475 | Label externalLoadComplete(this); |
476 | ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad); |
477 | ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad); |
478 | |
479 | DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload |
480 | DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag |
481 | |
482 | END_UNINTERRUPTED_SEQUENCE(sequencePutById); |
483 | |
484 | ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetPutByIdPropertyMapOffset1); |
485 | ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetPutByIdPropertyMapOffset2); |
486 | } |
487 | |
488 | void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
489 | { |
490 | int base = currentInstruction[1].u.operand; |
491 | int ident = currentInstruction[2].u.operand; |
492 | |
493 | linkSlowCaseIfNotJSCell(iter, base); |
494 | linkSlowCase(iter); |
495 | |
496 | JITStubCall stubCall(this, cti_op_put_by_id); |
497 | stubCall.addArgument(regT1, regT0); |
498 | stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident)))); |
499 | stubCall.addArgument(regT3, regT2); |
500 | Call call = stubCall.call(); |
501 | |
502 | // Track the location of the call; this will be used to recover patch information. |
503 | m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call; |
504 | m_propertyAccessInstructionIndex++; |
505 | } |
506 | |
507 | // Compile a store into an object's property storage. May overwrite base. |
508 | void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset) |
509 | { |
510 | int offset = cachedOffset; |
511 | if (structure->isUsingInlineStorage()) |
512 | offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register); |
513 | else |
514 | loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base); |
515 | emitStore(offset, valueTag, valuePayload, base); |
516 | } |
517 | |
518 | // Compile a load from an object's property storage. May overwrite base. |
519 | void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset) |
520 | { |
521 | int offset = cachedOffset; |
522 | if (structure->isUsingInlineStorage()) |
523 | offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register); |
524 | else |
525 | loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base); |
526 | emitLoad(offset, resultTag, resultPayload, base); |
527 | } |
528 | |
529 | void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset) |
530 | { |
531 | if (base->isUsingInlineStorage()) { |
532 | load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]), resultPayload); |
533 | load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]) + 4, resultTag); |
534 | return; |
535 | } |
536 | |
537 | size_t offset = cachedOffset * sizeof(JSValue); |
538 | |
539 | PropertyStorage* protoPropertyStorage = &base->m_externalStorage; |
540 | loadPtr(static_cast<void*>(protoPropertyStorage), temp); |
541 | load32(Address(temp, offset), resultPayload); |
542 | load32(Address(temp, offset + 4), resultTag); |
543 | } |
544 | |
545 | void JIT::testPrototype(Structure* structure, JumpList& failureCases) |
546 | { |
547 | if (structure->m_prototype.isNull()) |
548 | return; |
549 | |
550 | failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&asCell(structure->m_prototype)->m_structure), ImmPtr(asCell(structure->m_prototype)->m_structure))); |
551 | } |
552 | |
553 | void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress) |
554 | { |
555 | // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack. |
556 | |
557 | JumpList failureCases; |
558 | failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag))); |
559 | failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure))); |
560 | testPrototype(oldStructure, failureCases); |
561 | |
562 | // Verify that nothing in the prototype chain has a setter for this property. |
563 | for (RefPtr<Structure>* it = chain->head(); *it; ++it) |
564 | testPrototype(it->get(), failureCases); |
565 | |
566 | // Reallocate property storage if needed. |
567 | Call callTarget; |
568 | bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity(); |
569 | if (willNeedStorageRealloc) { |
570 | // This trampoline was called to like a JIT stub; before we can can call again we need to |
571 | // remove the return address from the stack, to prevent the stack from becoming misaligned. |
572 | preserveReturnAddressAfterCall(regT3); |
573 | |
574 | JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc); |
575 | stubCall.skipArgument(); // base |
576 | stubCall.skipArgument(); // ident |
577 | stubCall.skipArgument(); // value |
578 | stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity())); |
579 | stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity())); |
580 | stubCall.call(regT0); |
581 | |
582 | restoreReturnAddressBeforeReturn(regT3); |
583 | } |
584 | |
585 | sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount())); |
586 | add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount())); |
587 | storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))); |
588 | |
589 | load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*)), regT3); |
590 | load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*) + 4), regT2); |
591 | |
592 | // Write the value |
593 | compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset); |
594 | |
595 | ret(); |
596 | |
597 | ASSERT(!failureCases.empty()); |
598 | failureCases.link(this); |
599 | restoreArgumentReferenceForTrampoline(); |
600 | Call failureCall = tailRecursiveCall(); |
601 | |
602 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
603 | |
604 | patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail)); |
605 | |
606 | if (willNeedStorageRealloc) { |
607 | ASSERT(m_calls.size() == 1); |
608 | patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc)); |
609 | } |
610 | |
611 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); |
612 | stubInfo->stubRoutine = entryLabel; |
613 | RepatchBuffer repatchBuffer(m_codeBlock); |
614 | repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel); |
615 | } |
616 | |
617 | void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress) |
618 | { |
619 | RepatchBuffer repatchBuffer(codeBlock); |
620 | |
621 | // We don't want to patch more than once - in future go to cti_op_get_by_id_generic. |
622 | // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now. |
623 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail)); |
624 | |
625 | int offset = sizeof(JSValue) * cachedOffset; |
626 | |
627 | // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load |
628 | // and makes the subsequent load's offset automatically correct |
629 | if (structure->isUsingInlineStorage()) |
630 | repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad)); |
631 | |
632 | // Patch the offset into the propoerty map to load from, then patch the Structure to look for. |
633 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure); |
634 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1), offset); // payload |
635 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + 4); // tag |
636 | } |
637 | |
638 | void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress) |
639 | { |
640 | RepatchBuffer repatchBuffer(codeBlock); |
641 | |
642 | ASSERT(!methodCallLinkInfo.cachedStructure); |
643 | methodCallLinkInfo.cachedStructure = structure; |
644 | structure->ref(); |
645 | |
646 | Structure* prototypeStructure = proto->structure(); |
647 | ASSERT(!methodCallLinkInfo.cachedPrototypeStructure); |
648 | methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure; |
649 | prototypeStructure->ref(); |
650 | |
651 | repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure); |
652 | repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto); |
653 | repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure); |
654 | repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee); |
655 | |
656 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id)); |
657 | } |
658 | |
659 | void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress) |
660 | { |
661 | RepatchBuffer repatchBuffer(codeBlock); |
662 | |
663 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. |
664 | // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now. |
665 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic)); |
666 | |
667 | int offset = sizeof(JSValue) * cachedOffset; |
668 | |
669 | // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load |
670 | // and makes the subsequent load's offset automatically correct |
671 | if (structure->isUsingInlineStorage()) |
672 | repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad)); |
673 | |
674 | // Patch the offset into the propoerty map to load from, then patch the Structure to look for. |
675 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure); |
676 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset); // payload |
677 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + 4); // tag |
678 | } |
679 | |
680 | void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) |
681 | { |
682 | StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress); |
683 | |
684 | // regT0 holds a JSCell* |
685 | |
686 | // Check for array |
687 | Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)); |
688 | |
689 | // Checks out okay! - get the length from the storage |
690 | loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2); |
691 | load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2); |
692 | |
693 | Jump failureCases2 = branch32(Above, regT2, Imm32(INT_MAX)); |
694 | move(regT2, regT0); |
695 | move(Imm32(JSValue::Int32Tag), regT1); |
696 | Jump success = jump(); |
697 | |
698 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
699 | |
700 | // Use the patch information to link the failure cases back to the original slow case routine. |
701 | CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); |
702 | patchBuffer.link(failureCases1, slowCaseBegin); |
703 | patchBuffer.link(failureCases2, slowCaseBegin); |
704 | |
705 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. |
706 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); |
707 | |
708 | // Track the stub we have created so that it will be deleted later. |
709 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); |
710 | stubInfo->stubRoutine = entryLabel; |
711 | |
712 | // Finally patch the jump to slow case back in the hot path to jump here instead. |
713 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); |
714 | RepatchBuffer repatchBuffer(m_codeBlock); |
715 | repatchBuffer.relink(jumpLocation, entryLabel); |
716 | |
717 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. |
718 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail)); |
719 | } |
720 | |
721 | void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) |
722 | { |
723 | // regT0 holds a JSCell* |
724 | |
725 | // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is |
726 | // referencing the prototype object - let's speculatively load it's table nice and early!) |
727 | JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); |
728 | |
729 | Jump failureCases1 = checkStructure(regT0, structure); |
730 | |
731 | // Check the prototype object's Structure had not changed. |
732 | Structure** prototypeStructureAddress = &(protoObject->m_structure); |
733 | #if CPU(X86_64) |
734 | move(ImmPtr(prototypeStructure), regT3); |
735 | Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3); |
736 | #else |
737 | Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); |
738 | #endif |
739 | |
740 | // Checks out okay! - getDirectOffset |
741 | compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset); |
742 | |
743 | Jump success = jump(); |
744 | |
745 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
746 | |
747 | // Use the patch information to link the failure cases back to the original slow case routine. |
748 | CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); |
749 | patchBuffer.link(failureCases1, slowCaseBegin); |
750 | patchBuffer.link(failureCases2, slowCaseBegin); |
751 | |
752 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. |
753 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); |
754 | |
755 | // Track the stub we have created so that it will be deleted later. |
756 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); |
757 | stubInfo->stubRoutine = entryLabel; |
758 | |
759 | // Finally patch the jump to slow case back in the hot path to jump here instead. |
760 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); |
761 | RepatchBuffer repatchBuffer(m_codeBlock); |
762 | repatchBuffer.relink(jumpLocation, entryLabel); |
763 | |
764 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. |
765 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list)); |
766 | } |
767 | |
768 | |
769 | void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset) |
770 | { |
771 | // regT0 holds a JSCell* |
772 | |
773 | Jump failureCase = checkStructure(regT0, structure); |
774 | compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset); |
775 | Jump success = jump(); |
776 | |
777 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
778 | |
779 | // Use the patch information to link the failure cases back to the original slow case routine. |
780 | CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine; |
781 | if (!lastProtoBegin) |
782 | lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); |
783 | |
784 | patchBuffer.link(failureCase, lastProtoBegin); |
785 | |
786 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. |
787 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); |
788 | |
789 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); |
790 | |
791 | structure->ref(); |
792 | polymorphicStructures->list[currentIndex].set(entryLabel, structure); |
793 | |
794 | // Finally patch the jump to slow case back in the hot path to jump here instead. |
795 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); |
796 | RepatchBuffer repatchBuffer(m_codeBlock); |
797 | repatchBuffer.relink(jumpLocation, entryLabel); |
798 | } |
799 | |
800 | void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame) |
801 | { |
802 | // regT0 holds a JSCell* |
803 | |
804 | // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is |
805 | // referencing the prototype object - let's speculatively load it's table nice and early!) |
806 | JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); |
807 | |
808 | // Check eax is an object of the right Structure. |
809 | Jump failureCases1 = checkStructure(regT0, structure); |
810 | |
811 | // Check the prototype object's Structure had not changed. |
812 | Structure** prototypeStructureAddress = &(protoObject->m_structure); |
813 | #if CPU(X86_64) |
814 | move(ImmPtr(prototypeStructure), regT3); |
815 | Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3); |
816 | #else |
817 | Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); |
818 | #endif |
819 | |
820 | compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset); |
821 | |
822 | Jump success = jump(); |
823 | |
824 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
825 | |
826 | // Use the patch information to link the failure cases back to the original slow case routine. |
827 | CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; |
828 | patchBuffer.link(failureCases1, lastProtoBegin); |
829 | patchBuffer.link(failureCases2, lastProtoBegin); |
830 | |
831 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. |
832 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); |
833 | |
834 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); |
835 | |
836 | structure->ref(); |
837 | prototypeStructure->ref(); |
838 | prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure); |
839 | |
840 | // Finally patch the jump to slow case back in the hot path to jump here instead. |
841 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); |
842 | RepatchBuffer repatchBuffer(m_codeBlock); |
843 | repatchBuffer.relink(jumpLocation, entryLabel); |
844 | } |
845 | |
846 | void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame) |
847 | { |
848 | // regT0 holds a JSCell* |
849 | |
850 | ASSERT(count); |
851 | |
852 | JumpList bucketsOfFail; |
853 | |
854 | // Check eax is an object of the right Structure. |
855 | bucketsOfFail.append(checkStructure(regT0, structure)); |
856 | |
857 | Structure* currStructure = structure; |
858 | RefPtr<Structure>* chainEntries = chain->head(); |
859 | JSObject* protoObject = 0; |
860 | for (unsigned i = 0; i < count; ++i) { |
861 | protoObject = asObject(currStructure->prototypeForLookup(callFrame)); |
862 | currStructure = chainEntries[i].get(); |
863 | |
864 | // Check the prototype object's Structure had not changed. |
865 | Structure** prototypeStructureAddress = &(protoObject->m_structure); |
866 | #if CPU(X86_64) |
867 | move(ImmPtr(currStructure), regT3); |
868 | bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3)); |
869 | #else |
870 | bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); |
871 | #endif |
872 | } |
873 | ASSERT(protoObject); |
874 | |
875 | compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset); |
876 | Jump success = jump(); |
877 | |
878 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
879 | |
880 | // Use the patch information to link the failure cases back to the original slow case routine. |
881 | CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; |
882 | |
883 | patchBuffer.link(bucketsOfFail, lastProtoBegin); |
884 | |
885 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. |
886 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); |
887 | |
888 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); |
889 | |
890 | // Track the stub we have created so that it will be deleted later. |
891 | structure->ref(); |
892 | chain->ref(); |
893 | prototypeStructures->list[currentIndex].set(entryLabel, structure, chain); |
894 | |
895 | // Finally patch the jump to slow case back in the hot path to jump here instead. |
896 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); |
897 | RepatchBuffer repatchBuffer(m_codeBlock); |
898 | repatchBuffer.relink(jumpLocation, entryLabel); |
899 | } |
900 | |
901 | void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) |
902 | { |
903 | // regT0 holds a JSCell* |
904 | |
905 | ASSERT(count); |
906 | |
907 | JumpList bucketsOfFail; |
908 | |
909 | // Check eax is an object of the right Structure. |
910 | bucketsOfFail.append(checkStructure(regT0, structure)); |
911 | |
912 | Structure* currStructure = structure; |
913 | RefPtr<Structure>* chainEntries = chain->head(); |
914 | JSObject* protoObject = 0; |
915 | for (unsigned i = 0; i < count; ++i) { |
916 | protoObject = asObject(currStructure->prototypeForLookup(callFrame)); |
917 | currStructure = chainEntries[i].get(); |
918 | |
919 | // Check the prototype object's Structure had not changed. |
920 | Structure** prototypeStructureAddress = &(protoObject->m_structure); |
921 | #if CPU(X86_64) |
922 | move(ImmPtr(currStructure), regT3); |
923 | bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3)); |
924 | #else |
925 | bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); |
926 | #endif |
927 | } |
928 | ASSERT(protoObject); |
929 | |
930 | compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset); |
931 | Jump success = jump(); |
932 | |
933 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
934 | |
935 | // Use the patch information to link the failure cases back to the original slow case routine. |
936 | patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall)); |
937 | |
938 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. |
939 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); |
940 | |
941 | // Track the stub we have created so that it will be deleted later. |
942 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); |
943 | stubInfo->stubRoutine = entryLabel; |
944 | |
945 | // Finally patch the jump to slow case back in the hot path to jump here instead. |
946 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); |
947 | RepatchBuffer repatchBuffer(m_codeBlock); |
948 | repatchBuffer.relink(jumpLocation, entryLabel); |
949 | |
950 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. |
951 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list)); |
952 | } |
953 | |
954 | /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */ |
955 | |
956 | #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
957 | |
958 | void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID structure, RegisterID offset) |
959 | { |
960 | ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t)); |
961 | ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t)); |
962 | ASSERT(sizeof(JSValue) == 8); |
963 | |
964 | Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity)); |
965 | loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload); |
966 | loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag); |
967 | Jump finishedLoad = jump(); |
968 | notUsingInlineStorage.link(this); |
969 | loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base); |
970 | loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload); |
971 | loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag); |
972 | finishedLoad.link(this); |
973 | } |
974 | |
975 | void JIT::emit_op_get_by_pname(Instruction* currentInstruction) |
976 | { |
977 | unsigned dst = currentInstruction[1].u.operand; |
978 | unsigned base = currentInstruction[2].u.operand; |
979 | unsigned property = currentInstruction[3].u.operand; |
980 | unsigned expected = currentInstruction[4].u.operand; |
981 | unsigned iter = currentInstruction[5].u.operand; |
982 | unsigned i = currentInstruction[6].u.operand; |
983 | |
984 | emitLoad2(property, regT1, regT0, base, regT3, regT2); |
985 | emitJumpSlowCaseIfNotJSCell(property, regT1); |
986 | addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected))); |
987 | // Property registers are now available as the property is known |
988 | emitJumpSlowCaseIfNotJSCell(base, regT3); |
989 | emitLoadPayload(iter, regT1); |
990 | |
991 | // Test base's structure |
992 | loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0); |
993 | addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))); |
994 | load32(addressFor(i), regT3); |
995 | sub32(Imm32(1), regT3); |
996 | addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots)))); |
997 | compileGetDirectOffset(regT2, regT1, regT0, regT0, regT3); |
998 | |
999 | emitStore(dst, regT1, regT0); |
1000 | map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0); |
1001 | } |
1002 | |
1003 | void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
1004 | { |
1005 | unsigned dst = currentInstruction[1].u.operand; |
1006 | unsigned base = currentInstruction[2].u.operand; |
1007 | unsigned property = currentInstruction[3].u.operand; |
1008 | |
1009 | linkSlowCaseIfNotJSCell(iter, property); |
1010 | linkSlowCase(iter); |
1011 | linkSlowCaseIfNotJSCell(iter, base); |
1012 | linkSlowCase(iter); |
1013 | linkSlowCase(iter); |
1014 | |
1015 | JITStubCall stubCall(this, cti_op_get_by_val); |
1016 | stubCall.addArgument(base); |
1017 | stubCall.addArgument(property); |
1018 | stubCall.call(dst); |
1019 | } |
1020 | |
1021 | #else // USE(JSVALUE32_64) |
1022 | |
1023 | void JIT::emit_op_get_by_val(Instruction* currentInstruction) |
1024 | { |
1025 | unsigned dst = currentInstruction[1].u.operand; |
1026 | unsigned base = currentInstruction[2].u.operand; |
1027 | unsigned property = currentInstruction[3].u.operand; |
1028 | |
1029 | emitGetVirtualRegisters(src1: base, dst1: regT0, src2: property, dst2: regT1); |
1030 | emitJumpSlowCaseIfNotImmediateInteger(reg: regT1); |
1031 | #if USE(JSVALUE64) |
1032 | // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter. |
1033 | // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if |
1034 | // number was signed since m_vectorLength is always less than intmax (since the total allocation |
1035 | // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value |
1036 | // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign |
1037 | // extending since it makes it easier to re-tag the value in the slow case. |
1038 | zeroExtend32ToPtr(src: regT1, dest: regT1); |
1039 | #else |
1040 | emitFastArithImmToInt(regT1); |
1041 | #endif |
1042 | emitJumpSlowCaseIfNotJSCell(reg: regT0, vReg: base); |
1043 | addSlowCase(jump: branchPtr(cond: NotEqual, left: Address(regT0), right: ImmPtr(m_globalData->jsArrayVPtr))); |
1044 | |
1045 | loadPtr(address: Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), dest: regT2); |
1046 | addSlowCase(jump: branch32(cond: AboveOrEqual, left: regT1, right: Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength)))); |
1047 | |
1048 | loadPtr(address: BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), dest: regT0); |
1049 | addSlowCase(jump: branchTestPtr(cond: Zero, reg: regT0)); |
1050 | |
1051 | emitPutVirtualRegister(dst); |
1052 | } |
1053 | |
1054 | void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch) |
1055 | { |
1056 | ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t)); |
1057 | ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t)); |
1058 | |
1059 | Jump notUsingInlineStorage = branch32(cond: NotEqual, left: Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), right: Imm32(JSObject::inlineStorageCapacity)); |
1060 | loadPtr(address: BaseIndex(base, offset, ScalePtr, OBJECT_OFFSETOF(JSObject, m_inlineStorage)), dest: result); |
1061 | Jump finishedLoad = jump(); |
1062 | notUsingInlineStorage.link(masm: this); |
1063 | loadPtr(address: Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), dest: scratch); |
1064 | loadPtr(address: BaseIndex(scratch, offset, ScalePtr, 0), dest: result); |
1065 | finishedLoad.link(masm: this); |
1066 | } |
1067 | |
1068 | void JIT::emit_op_get_by_pname(Instruction* currentInstruction) |
1069 | { |
1070 | unsigned dst = currentInstruction[1].u.operand; |
1071 | unsigned base = currentInstruction[2].u.operand; |
1072 | unsigned property = currentInstruction[3].u.operand; |
1073 | unsigned expected = currentInstruction[4].u.operand; |
1074 | unsigned iter = currentInstruction[5].u.operand; |
1075 | unsigned i = currentInstruction[6].u.operand; |
1076 | |
1077 | emitGetVirtualRegister(src: property, dst: regT0); |
1078 | addSlowCase(jump: branchPtr(cond: NotEqual, left: regT0, right: addressFor(index: expected))); |
1079 | emitGetVirtualRegisters(src1: base, dst1: regT0, src2: iter, dst2: regT1); |
1080 | emitJumpSlowCaseIfNotJSCell(reg: regT0, vReg: base); |
1081 | |
1082 | // Test base's structure |
1083 | loadPtr(address: Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), dest: regT2); |
1084 | addSlowCase(jump: branchPtr(cond: NotEqual, left: regT2, right: Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))); |
1085 | load32(address: addressFor(index: i), dest: regT3); |
1086 | sub32(imm: Imm32(1), dest: regT3); |
1087 | addSlowCase(jump: branch32(cond: AboveOrEqual, left: regT3, right: Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots)))); |
1088 | compileGetDirectOffset(base: regT0, result: regT0, structure: regT2, offset: regT3, scratch: regT1); |
1089 | |
1090 | emitPutVirtualRegister(dst, from: regT0); |
1091 | } |
1092 | |
1093 | void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
1094 | { |
1095 | unsigned dst = currentInstruction[1].u.operand; |
1096 | unsigned base = currentInstruction[2].u.operand; |
1097 | unsigned property = currentInstruction[3].u.operand; |
1098 | |
1099 | linkSlowCase(iter); |
1100 | linkSlowCaseIfNotJSCell(iter, vReg: base); |
1101 | linkSlowCase(iter); |
1102 | linkSlowCase(iter); |
1103 | |
1104 | JITStubCall stubCall(this, cti_op_get_by_val); |
1105 | stubCall.addArgument(src: base, scratchRegister: regT2); |
1106 | stubCall.addArgument(src: property, scratchRegister: regT2); |
1107 | stubCall.call(dst); |
1108 | } |
1109 | |
1110 | void JIT::emit_op_put_by_val(Instruction* currentInstruction) |
1111 | { |
1112 | unsigned base = currentInstruction[1].u.operand; |
1113 | unsigned property = currentInstruction[2].u.operand; |
1114 | unsigned value = currentInstruction[3].u.operand; |
1115 | |
1116 | emitGetVirtualRegisters(src1: base, dst1: regT0, src2: property, dst2: regT1); |
1117 | emitJumpSlowCaseIfNotImmediateInteger(reg: regT1); |
1118 | #if USE(JSVALUE64) |
1119 | // See comment in op_get_by_val. |
1120 | zeroExtend32ToPtr(src: regT1, dest: regT1); |
1121 | #else |
1122 | emitFastArithImmToInt(regT1); |
1123 | #endif |
1124 | emitJumpSlowCaseIfNotJSCell(reg: regT0, vReg: base); |
1125 | addSlowCase(jump: branchPtr(cond: NotEqual, left: Address(regT0), right: ImmPtr(m_globalData->jsArrayVPtr))); |
1126 | addSlowCase(jump: branch32(cond: AboveOrEqual, left: regT1, right: Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength)))); |
1127 | |
1128 | loadPtr(address: Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), dest: regT2); |
1129 | |
1130 | Jump empty = branchTestPtr(cond: Zero, address: BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); |
1131 | |
1132 | Label storeResult(this); |
1133 | emitGetVirtualRegister(src: value, dst: regT0); |
1134 | storePtr(src: regT0, address: BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); |
1135 | Jump end = jump(); |
1136 | |
1137 | empty.link(masm: this); |
1138 | add32(imm: Imm32(1), address: Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); |
1139 | branch32(cond: Below, left: regT1, right: Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(label: storeResult, masm: this); |
1140 | |
1141 | move(src: regT1, dest: regT0); |
1142 | add32(imm: Imm32(1), dest: regT0); |
1143 | store32(src: regT0, address: Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))); |
1144 | jump().linkTo(label: storeResult, masm: this); |
1145 | |
1146 | end.link(masm: this); |
1147 | } |
1148 | |
1149 | void JIT::emit_op_put_by_index(Instruction* currentInstruction) |
1150 | { |
1151 | JITStubCall stubCall(this, cti_op_put_by_index); |
1152 | stubCall.addArgument(src: currentInstruction[1].u.operand, scratchRegister: regT2); |
1153 | stubCall.addArgument(argument: Imm32(currentInstruction[2].u.operand)); |
1154 | stubCall.addArgument(src: currentInstruction[3].u.operand, scratchRegister: regT2); |
1155 | stubCall.call(); |
1156 | } |
1157 | |
1158 | void JIT::emit_op_put_getter(Instruction* currentInstruction) |
1159 | { |
1160 | JITStubCall stubCall(this, cti_op_put_getter); |
1161 | stubCall.addArgument(src: currentInstruction[1].u.operand, scratchRegister: regT2); |
1162 | stubCall.addArgument(argument: ImmPtr(&m_codeBlock->identifier(index: currentInstruction[2].u.operand))); |
1163 | stubCall.addArgument(src: currentInstruction[3].u.operand, scratchRegister: regT2); |
1164 | stubCall.call(); |
1165 | } |
1166 | |
1167 | void JIT::emit_op_put_setter(Instruction* currentInstruction) |
1168 | { |
1169 | JITStubCall stubCall(this, cti_op_put_setter); |
1170 | stubCall.addArgument(src: currentInstruction[1].u.operand, scratchRegister: regT2); |
1171 | stubCall.addArgument(argument: ImmPtr(&m_codeBlock->identifier(index: currentInstruction[2].u.operand))); |
1172 | stubCall.addArgument(src: currentInstruction[3].u.operand, scratchRegister: regT2); |
1173 | stubCall.call(); |
1174 | } |
1175 | |
1176 | void JIT::emit_op_del_by_id(Instruction* currentInstruction) |
1177 | { |
1178 | JITStubCall stubCall(this, cti_op_del_by_id); |
1179 | stubCall.addArgument(src: currentInstruction[2].u.operand, scratchRegister: regT2); |
1180 | stubCall.addArgument(argument: ImmPtr(&m_codeBlock->identifier(index: currentInstruction[3].u.operand))); |
1181 | stubCall.call(dst: currentInstruction[1].u.operand); |
1182 | } |
1183 | |
1184 | |
1185 | #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
1186 | |
1187 | /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */ |
1188 | |
1189 | // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair. |
1190 | void JIT::emit_op_method_check(Instruction*) {} |
1191 | void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); } |
1192 | #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS) |
1193 | #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS" |
1194 | #endif |
1195 | |
1196 | void JIT::emit_op_get_by_id(Instruction* currentInstruction) |
1197 | { |
1198 | unsigned resultVReg = currentInstruction[1].u.operand; |
1199 | unsigned baseVReg = currentInstruction[2].u.operand; |
1200 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); |
1201 | |
1202 | emitGetVirtualRegister(baseVReg, regT0); |
1203 | JITStubCall stubCall(this, cti_op_get_by_id_generic); |
1204 | stubCall.addArgument(regT0); |
1205 | stubCall.addArgument(ImmPtr(ident)); |
1206 | stubCall.call(resultVReg); |
1207 | |
1208 | m_propertyAccessInstructionIndex++; |
1209 | } |
1210 | |
1211 | void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&) |
1212 | { |
1213 | ASSERT_NOT_REACHED(); |
1214 | } |
1215 | |
1216 | void JIT::emit_op_put_by_id(Instruction* currentInstruction) |
1217 | { |
1218 | unsigned baseVReg = currentInstruction[1].u.operand; |
1219 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand)); |
1220 | unsigned valueVReg = currentInstruction[3].u.operand; |
1221 | |
1222 | emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1); |
1223 | |
1224 | JITStubCall stubCall(this, cti_op_put_by_id_generic); |
1225 | stubCall.addArgument(regT0); |
1226 | stubCall.addArgument(ImmPtr(ident)); |
1227 | stubCall.addArgument(regT1); |
1228 | stubCall.call(); |
1229 | |
1230 | m_propertyAccessInstructionIndex++; |
1231 | } |
1232 | |
1233 | void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&) |
1234 | { |
1235 | ASSERT_NOT_REACHED(); |
1236 | } |
1237 | |
1238 | #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
1239 | |
1240 | /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */ |
1241 | |
1242 | #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS) |
1243 | |
1244 | void JIT::emit_op_method_check(Instruction* currentInstruction) |
1245 | { |
1246 | // Assert that the following instruction is a get_by_id. |
1247 | ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id); |
1248 | |
1249 | currentInstruction += OPCODE_LENGTH(op_method_check); |
1250 | unsigned resultVReg = currentInstruction[1].u.operand; |
1251 | unsigned baseVReg = currentInstruction[2].u.operand; |
1252 | Identifier* ident = &(m_codeBlock->identifier(index: currentInstruction[3].u.operand)); |
1253 | |
1254 | emitGetVirtualRegister(src: baseVReg, dst: regT0); |
1255 | |
1256 | // Do the method check - check the object & its prototype's structure inline (this is the common case). |
1257 | m_methodCallCompilationInfo.append(val: MethodCallCompilationInfo(m_propertyAccessInstructionIndex)); |
1258 | MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last(); |
1259 | |
1260 | Jump notCell = emitJumpIfNotJSCell(reg: regT0); |
1261 | |
1262 | BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck); |
1263 | |
1264 | Jump structureCheck = branchPtrWithPatch(cond: NotEqual, left: Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), dataLabel&: info.structureToCompare, initialRightValue: ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); |
1265 | DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(initialValue: ImmPtr(0), dest: regT1); |
1266 | Jump protoStructureCheck = branchPtrWithPatch(cond: NotEqual, left: Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), dataLabel&: protoStructureToCompare, initialRightValue: ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); |
1267 | |
1268 | // This will be relinked to load the function without doing a load. |
1269 | DataLabelPtr putFunction = moveWithPatch(initialValue: ImmPtr(0), dest: regT0); |
1270 | |
1271 | END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck); |
1272 | |
1273 | Jump match = jump(); |
1274 | |
1275 | ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj); |
1276 | ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct); |
1277 | ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction); |
1278 | |
1279 | // Link the failure cases here. |
1280 | notCell.link(masm: this); |
1281 | structureCheck.link(masm: this); |
1282 | protoStructureCheck.link(masm: this); |
1283 | |
1284 | // Do a regular(ish) get_by_id (the slow case will be link to |
1285 | // cti_op_get_by_id_method_check instead of cti_op_get_by_id. |
1286 | compileGetByIdHotPath(resultVReg, baseVReg, ident, propertyAccessInstructionIndex: m_propertyAccessInstructionIndex++); |
1287 | |
1288 | match.link(masm: this); |
1289 | emitPutVirtualRegister(dst: resultVReg); |
1290 | |
1291 | // We've already generated the following get_by_id, so make sure it's skipped over. |
1292 | m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id); |
1293 | } |
1294 | |
1295 | void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
1296 | { |
1297 | currentInstruction += OPCODE_LENGTH(op_method_check); |
1298 | unsigned resultVReg = currentInstruction[1].u.operand; |
1299 | unsigned baseVReg = currentInstruction[2].u.operand; |
1300 | Identifier* ident = &(m_codeBlock->identifier(index: currentInstruction[3].u.operand)); |
1301 | |
1302 | compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, isMethodCheck: true); |
1303 | |
1304 | // We've already generated the following get_by_id, so make sure it's skipped over. |
1305 | m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id); |
1306 | } |
1307 | |
1308 | #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS) |
1309 | |
1310 | // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair. |
1311 | void JIT::emit_op_method_check(Instruction*) {} |
1312 | void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); } |
1313 | |
1314 | #endif |
1315 | |
1316 | void JIT::emit_op_get_by_id(Instruction* currentInstruction) |
1317 | { |
1318 | unsigned resultVReg = currentInstruction[1].u.operand; |
1319 | unsigned baseVReg = currentInstruction[2].u.operand; |
1320 | Identifier* ident = &(m_codeBlock->identifier(index: currentInstruction[3].u.operand)); |
1321 | |
1322 | emitGetVirtualRegister(src: baseVReg, dst: regT0); |
1323 | compileGetByIdHotPath(resultVReg, baseVReg, ident, propertyAccessInstructionIndex: m_propertyAccessInstructionIndex++); |
1324 | emitPutVirtualRegister(dst: resultVReg); |
1325 | } |
1326 | |
1327 | void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex) |
1328 | { |
1329 | // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched. |
1330 | // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump |
1331 | // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label |
1332 | // to jump back to if one of these trampolies finds a match. |
1333 | |
1334 | emitJumpSlowCaseIfNotJSCell(reg: regT0, vReg: baseVReg); |
1335 | |
1336 | BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); |
1337 | |
1338 | Label hotPathBegin(this); |
1339 | m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; |
1340 | |
1341 | DataLabelPtr structureToCompare; |
1342 | Jump structureCheck = branchPtrWithPatch(cond: NotEqual, left: Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), dataLabel&: structureToCompare, initialRightValue: ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); |
1343 | addSlowCase(jump: structureCheck); |
1344 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure); |
1345 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase) |
1346 | |
1347 | Label externalLoad = loadPtrWithPatchToLEA(address: Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), dest: regT0); |
1348 | Label externalLoadComplete(this); |
1349 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, externalLoad), patchOffsetGetByIdExternalLoad); |
1350 | ASSERT_JIT_OFFSET(differenceBetween(externalLoad, externalLoadComplete), patchLengthGetByIdExternalLoad); |
1351 | |
1352 | DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(address: Address(regT0, patchGetByIdDefaultOffset), dest: regT0); |
1353 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetGetByIdPropertyMapOffset); |
1354 | |
1355 | Label putResult(this); |
1356 | |
1357 | END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); |
1358 | |
1359 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult); |
1360 | } |
1361 | |
1362 | void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
1363 | { |
1364 | unsigned resultVReg = currentInstruction[1].u.operand; |
1365 | unsigned baseVReg = currentInstruction[2].u.operand; |
1366 | Identifier* ident = &(m_codeBlock->identifier(index: currentInstruction[3].u.operand)); |
1367 | |
1368 | compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, isMethodCheck: false); |
1369 | } |
1370 | |
1371 | void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck) |
1372 | { |
1373 | // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset |
1374 | // so that we only need track one pointer into the slow case code - we track a pointer to the location |
1375 | // of the call (which we can use to look up the patch information), but should a array-length or |
1376 | // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back |
1377 | // the distance from the call to the head of the slow case. |
1378 | |
1379 | linkSlowCaseIfNotJSCell(iter, vReg: baseVReg); |
1380 | linkSlowCase(iter); |
1381 | |
1382 | BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase); |
1383 | |
1384 | #ifndef NDEBUG |
1385 | Label coldPathBegin(this); |
1386 | #endif |
1387 | JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id); |
1388 | stubCall.addArgument(argument: regT0); |
1389 | stubCall.addArgument(argument: ImmPtr(ident)); |
1390 | Call call = stubCall.call(dst: resultVReg); |
1391 | |
1392 | END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase); |
1393 | |
1394 | ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall); |
1395 | |
1396 | // Track the location of the call; this will be used to recover patch information. |
1397 | m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call; |
1398 | m_propertyAccessInstructionIndex++; |
1399 | } |
1400 | |
1401 | void JIT::emit_op_put_by_id(Instruction* currentInstruction) |
1402 | { |
1403 | unsigned baseVReg = currentInstruction[1].u.operand; |
1404 | unsigned valueVReg = currentInstruction[3].u.operand; |
1405 | |
1406 | unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++; |
1407 | |
1408 | // In order to be able to patch both the Structure, and the object offset, we store one pointer, |
1409 | // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code |
1410 | // such that the Structure & offset are always at the same distance from this. |
1411 | |
1412 | emitGetVirtualRegisters(src1: baseVReg, dst1: regT0, src2: valueVReg, dst2: regT1); |
1413 | |
1414 | // Jump to a slow case if either the base object is an immediate, or if the Structure does not match. |
1415 | emitJumpSlowCaseIfNotJSCell(reg: regT0, vReg: baseVReg); |
1416 | |
1417 | BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById); |
1418 | |
1419 | Label hotPathBegin(this); |
1420 | m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; |
1421 | |
1422 | // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over. |
1423 | DataLabelPtr structureToCompare; |
1424 | addSlowCase(jump: branchPtrWithPatch(cond: NotEqual, left: Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), dataLabel&: structureToCompare, initialRightValue: ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)))); |
1425 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure); |
1426 | |
1427 | // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used. |
1428 | Label externalLoad = loadPtrWithPatchToLEA(address: Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), dest: regT0); |
1429 | Label externalLoadComplete(this); |
1430 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, externalLoad), patchOffsetPutByIdExternalLoad); |
1431 | ASSERT_JIT_OFFSET(differenceBetween(externalLoad, externalLoadComplete), patchLengthPutByIdExternalLoad); |
1432 | |
1433 | DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(src: regT1, address: Address(regT0, patchGetByIdDefaultOffset)); |
1434 | |
1435 | END_UNINTERRUPTED_SEQUENCE(sequencePutById); |
1436 | |
1437 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetPutByIdPropertyMapOffset); |
1438 | } |
1439 | |
1440 | void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
1441 | { |
1442 | unsigned baseVReg = currentInstruction[1].u.operand; |
1443 | Identifier* ident = &(m_codeBlock->identifier(index: currentInstruction[2].u.operand)); |
1444 | |
1445 | unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++; |
1446 | |
1447 | linkSlowCaseIfNotJSCell(iter, vReg: baseVReg); |
1448 | linkSlowCase(iter); |
1449 | |
1450 | JITStubCall stubCall(this, cti_op_put_by_id); |
1451 | stubCall.addArgument(argument: regT0); |
1452 | stubCall.addArgument(argument: ImmPtr(ident)); |
1453 | stubCall.addArgument(argument: regT1); |
1454 | Call call = stubCall.call(); |
1455 | |
1456 | // Track the location of the call; this will be used to recover patch information. |
1457 | m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call; |
1458 | } |
1459 | |
1460 | // Compile a store into an object's property storage. May overwrite the |
1461 | // value in objectReg. |
1462 | void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset) |
1463 | { |
1464 | int offset = cachedOffset * sizeof(JSValue); |
1465 | if (structure->isUsingInlineStorage()) |
1466 | offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage); |
1467 | else |
1468 | loadPtr(address: Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), dest: base); |
1469 | storePtr(src: value, address: Address(base, offset)); |
1470 | } |
1471 | |
1472 | // Compile a load from an object's property storage. May overwrite base. |
1473 | void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset) |
1474 | { |
1475 | int offset = cachedOffset * sizeof(JSValue); |
1476 | if (structure->isUsingInlineStorage()) |
1477 | offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage); |
1478 | else |
1479 | loadPtr(address: Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), dest: base); |
1480 | loadPtr(address: Address(base, offset), dest: result); |
1481 | } |
1482 | |
1483 | void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset) |
1484 | { |
1485 | if (base->isUsingInlineStorage()) |
1486 | loadPtr(address: static_cast<void*>(&base->m_inlineStorage[cachedOffset]), dest: result); |
1487 | else { |
1488 | PropertyStorage* protoPropertyStorage = &base->m_externalStorage; |
1489 | loadPtr(address: static_cast<void*>(protoPropertyStorage), dest: temp); |
1490 | loadPtr(address: Address(temp, cachedOffset * sizeof(JSValue)), dest: result); |
1491 | } |
1492 | } |
1493 | |
1494 | void JIT::testPrototype(Structure* structure, JumpList& failureCases) |
1495 | { |
1496 | if (structure->m_prototype.isNull()) |
1497 | return; |
1498 | |
1499 | move(imm: ImmPtr(&asCell(value: structure->m_prototype)->m_structure), dest: regT2); |
1500 | move(imm: ImmPtr(asCell(value: structure->m_prototype)->m_structure), dest: regT3); |
1501 | failureCases.append(jump: branchPtr(cond: NotEqual, left: Address(regT2), right: regT3)); |
1502 | } |
1503 | |
1504 | void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress) |
1505 | { |
1506 | JumpList failureCases; |
1507 | // Check eax is an object of the right Structure. |
1508 | failureCases.append(jump: emitJumpIfNotJSCell(reg: regT0)); |
1509 | failureCases.append(jump: branchPtr(cond: NotEqual, left: Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), right: ImmPtr(oldStructure))); |
1510 | testPrototype(structure: oldStructure, failureCases); |
1511 | |
1512 | // ecx = baseObject->m_structure |
1513 | for (RefPtr<Structure>* it = chain->head(); *it; ++it) |
1514 | testPrototype(structure: it->get(), failureCases); |
1515 | |
1516 | Call callTarget; |
1517 | |
1518 | // emit a call only if storage realloc is needed |
1519 | bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity(); |
1520 | if (willNeedStorageRealloc) { |
1521 | // This trampoline was called to like a JIT stub; before we can can call again we need to |
1522 | // remove the return address from the stack, to prevent the stack from becoming misaligned. |
1523 | preserveReturnAddressAfterCall(reg: regT3); |
1524 | |
1525 | JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc); |
1526 | stubCall.skipArgument(); // base |
1527 | stubCall.skipArgument(); // ident |
1528 | stubCall.skipArgument(); // value |
1529 | stubCall.addArgument(argument: Imm32(oldStructure->propertyStorageCapacity())); |
1530 | stubCall.addArgument(argument: Imm32(newStructure->propertyStorageCapacity())); |
1531 | stubCall.call(dst: regT0); |
1532 | emitGetJITStubArg(argumentNumber: 2, dst: regT1); |
1533 | |
1534 | restoreReturnAddressBeforeReturn(reg: regT3); |
1535 | } |
1536 | |
1537 | // Assumes m_refCount can be decremented easily, refcount decrement is safe as |
1538 | // codeblock should ensure oldStructure->m_refCount > 0 |
1539 | sub32(imm: Imm32(1), address: AbsoluteAddress(oldStructure->addressOfCount())); |
1540 | add32(imm: Imm32(1), address: AbsoluteAddress(newStructure->addressOfCount())); |
1541 | storePtr(imm: ImmPtr(newStructure), address: Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))); |
1542 | |
1543 | // write the value |
1544 | compilePutDirectOffset(base: regT0, value: regT1, structure: newStructure, cachedOffset); |
1545 | |
1546 | ret(); |
1547 | |
1548 | ASSERT(!failureCases.empty()); |
1549 | failureCases.link(masm: this); |
1550 | restoreArgumentReferenceForTrampoline(); |
1551 | Call failureCall = tailRecursiveCall(); |
1552 | |
1553 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
1554 | |
1555 | patchBuffer.link(call: failureCall, function: FunctionPtr(cti_op_put_by_id_fail)); |
1556 | |
1557 | if (willNeedStorageRealloc) { |
1558 | ASSERT(m_calls.size() == 1); |
1559 | patchBuffer.link(call: m_calls[0].from, function: FunctionPtr(cti_op_put_by_id_transition_realloc)); |
1560 | } |
1561 | |
1562 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); |
1563 | stubInfo->stubRoutine = entryLabel; |
1564 | RepatchBuffer repatchBuffer(m_codeBlock); |
1565 | repatchBuffer.relinkCallerToTrampoline(returnAddress, label: entryLabel); |
1566 | } |
1567 | |
1568 | void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress) |
1569 | { |
1570 | RepatchBuffer repatchBuffer(codeBlock); |
1571 | |
1572 | // We don't want to patch more than once - in future go to cti_op_get_by_id_generic. |
1573 | // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now. |
1574 | repatchBuffer.relinkCallerToFunction(returnAddress, function: FunctionPtr(cti_op_get_by_id_self_fail)); |
1575 | |
1576 | int offset = sizeof(JSValue) * cachedOffset; |
1577 | |
1578 | // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load |
1579 | // and makes the subsequent load's offset automatically correct |
1580 | if (structure->isUsingInlineStorage()) |
1581 | repatchBuffer.repatchLoadPtrToLEA(instruction: stubInfo->hotPathBegin.instructionAtOffset(offset: patchOffsetGetByIdExternalLoad)); |
1582 | |
1583 | // Patch the offset into the propoerty map to load from, then patch the Structure to look for. |
1584 | repatchBuffer.repatch(dataLabelPtr: stubInfo->hotPathBegin.dataLabelPtrAtOffset(offset: patchOffsetGetByIdStructure), value: structure); |
1585 | repatchBuffer.repatch(dataLabel32: stubInfo->hotPathBegin.dataLabel32AtOffset(offset: patchOffsetGetByIdPropertyMapOffset), value: offset); |
1586 | } |
1587 | |
1588 | void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress) |
1589 | { |
1590 | RepatchBuffer repatchBuffer(codeBlock); |
1591 | |
1592 | ASSERT(!methodCallLinkInfo.cachedStructure); |
1593 | methodCallLinkInfo.cachedStructure = structure; |
1594 | structure->ref(); |
1595 | |
1596 | Structure* prototypeStructure = proto->structure(); |
1597 | ASSERT(!methodCallLinkInfo.cachedPrototypeStructure); |
1598 | methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure; |
1599 | prototypeStructure->ref(); |
1600 | |
1601 | repatchBuffer.repatch(dataLabelPtr: methodCallLinkInfo.structureLabel, value: structure); |
1602 | repatchBuffer.repatch(dataLabelPtr: methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(offset: patchOffsetMethodCheckProtoObj), value: proto); |
1603 | repatchBuffer.repatch(dataLabelPtr: methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(offset: patchOffsetMethodCheckProtoStruct), value: prototypeStructure); |
1604 | repatchBuffer.repatch(dataLabelPtr: methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(offset: patchOffsetMethodCheckPutFunction), value: callee); |
1605 | |
1606 | repatchBuffer.relinkCallerToFunction(returnAddress, function: FunctionPtr(cti_op_get_by_id)); |
1607 | } |
1608 | |
1609 | void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress) |
1610 | { |
1611 | RepatchBuffer repatchBuffer(codeBlock); |
1612 | |
1613 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. |
1614 | // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now. |
1615 | repatchBuffer.relinkCallerToFunction(returnAddress, function: FunctionPtr(cti_op_put_by_id_generic)); |
1616 | |
1617 | int offset = sizeof(JSValue) * cachedOffset; |
1618 | |
1619 | // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load |
1620 | // and makes the subsequent load's offset automatically correct |
1621 | if (structure->isUsingInlineStorage()) |
1622 | repatchBuffer.repatchLoadPtrToLEA(instruction: stubInfo->hotPathBegin.instructionAtOffset(offset: patchOffsetPutByIdExternalLoad)); |
1623 | |
1624 | // Patch the offset into the propoerty map to load from, then patch the Structure to look for. |
1625 | repatchBuffer.repatch(dataLabelPtr: stubInfo->hotPathBegin.dataLabelPtrAtOffset(offset: patchOffsetPutByIdStructure), value: structure); |
1626 | repatchBuffer.repatch(dataLabel32: stubInfo->hotPathBegin.dataLabel32AtOffset(offset: patchOffsetPutByIdPropertyMapOffset), value: offset); |
1627 | } |
1628 | |
1629 | void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) |
1630 | { |
1631 | StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress); |
1632 | |
1633 | // Check eax is an array |
1634 | Jump failureCases1 = branchPtr(cond: NotEqual, left: Address(regT0), right: ImmPtr(m_globalData->jsArrayVPtr)); |
1635 | |
1636 | // Checks out okay! - get the length from the storage |
1637 | loadPtr(address: Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), dest: regT2); |
1638 | load32(address: Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), dest: regT2); |
1639 | |
1640 | Jump failureCases2 = branch32(cond: Above, left: regT2, right: Imm32(JSImmediate::maxImmediateInt)); |
1641 | |
1642 | emitFastArithIntToImmNoCheck(src: regT2, dest: regT0); |
1643 | Jump success = jump(); |
1644 | |
1645 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
1646 | |
1647 | // Use the patch information to link the failure cases back to the original slow case routine. |
1648 | CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(offset: -patchOffsetGetByIdSlowCaseCall); |
1649 | patchBuffer.link(jump: failureCases1, label: slowCaseBegin); |
1650 | patchBuffer.link(jump: failureCases2, label: slowCaseBegin); |
1651 | |
1652 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. |
1653 | patchBuffer.link(jump: success, label: stubInfo->hotPathBegin.labelAtOffset(offset: patchOffsetGetByIdPutResult)); |
1654 | |
1655 | // Track the stub we have created so that it will be deleted later. |
1656 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); |
1657 | stubInfo->stubRoutine = entryLabel; |
1658 | |
1659 | // Finally patch the jump to slow case back in the hot path to jump here instead. |
1660 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(offset: patchOffsetGetByIdBranchToSlowCase); |
1661 | RepatchBuffer repatchBuffer(m_codeBlock); |
1662 | repatchBuffer.relink(jump: jumpLocation, destination: entryLabel); |
1663 | |
1664 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. |
1665 | repatchBuffer.relinkCallerToFunction(returnAddress, function: FunctionPtr(cti_op_get_by_id_array_fail)); |
1666 | } |
1667 | |
1668 | void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) |
1669 | { |
1670 | // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is |
1671 | // referencing the prototype object - let's speculatively load it's table nice and early!) |
1672 | JSObject* protoObject = asObject(value: structure->prototypeForLookup(exec: callFrame)); |
1673 | |
1674 | // Check eax is an object of the right Structure. |
1675 | Jump failureCases1 = checkStructure(reg: regT0, structure); |
1676 | |
1677 | // Check the prototype object's Structure had not changed. |
1678 | Structure** prototypeStructureAddress = &(protoObject->m_structure); |
1679 | #if CPU(X86_64) |
1680 | move(imm: ImmPtr(prototypeStructure), dest: regT3); |
1681 | Jump failureCases2 = branchPtr(cond: NotEqual, left: AbsoluteAddress(prototypeStructureAddress), right: regT3); |
1682 | #else |
1683 | Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); |
1684 | #endif |
1685 | |
1686 | // Checks out okay! - getDirectOffset |
1687 | compileGetDirectOffset(base: protoObject, temp: regT1, result: regT0, cachedOffset); |
1688 | |
1689 | Jump success = jump(); |
1690 | |
1691 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
1692 | |
1693 | // Use the patch information to link the failure cases back to the original slow case routine. |
1694 | CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(offset: -patchOffsetGetByIdSlowCaseCall); |
1695 | patchBuffer.link(jump: failureCases1, label: slowCaseBegin); |
1696 | patchBuffer.link(jump: failureCases2, label: slowCaseBegin); |
1697 | |
1698 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. |
1699 | patchBuffer.link(jump: success, label: stubInfo->hotPathBegin.labelAtOffset(offset: patchOffsetGetByIdPutResult)); |
1700 | |
1701 | // Track the stub we have created so that it will be deleted later. |
1702 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); |
1703 | stubInfo->stubRoutine = entryLabel; |
1704 | |
1705 | // Finally patch the jump to slow case back in the hot path to jump here instead. |
1706 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(offset: patchOffsetGetByIdBranchToSlowCase); |
1707 | RepatchBuffer repatchBuffer(m_codeBlock); |
1708 | repatchBuffer.relink(jump: jumpLocation, destination: entryLabel); |
1709 | |
1710 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. |
1711 | repatchBuffer.relinkCallerToFunction(returnAddress, function: FunctionPtr(cti_op_get_by_id_proto_list)); |
1712 | } |
1713 | |
1714 | void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset) |
1715 | { |
1716 | Jump failureCase = checkStructure(reg: regT0, structure); |
1717 | compileGetDirectOffset(base: regT0, result: regT0, structure, cachedOffset); |
1718 | Jump success = jump(); |
1719 | |
1720 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
1721 | |
1722 | // Use the patch information to link the failure cases back to the original slow case routine. |
1723 | CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine; |
1724 | if (!lastProtoBegin) |
1725 | lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(offset: -patchOffsetGetByIdSlowCaseCall); |
1726 | |
1727 | patchBuffer.link(jump: failureCase, label: lastProtoBegin); |
1728 | |
1729 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. |
1730 | patchBuffer.link(jump: success, label: stubInfo->hotPathBegin.labelAtOffset(offset: patchOffsetGetByIdPutResult)); |
1731 | |
1732 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); |
1733 | |
1734 | structure->ref(); |
1735 | polymorphicStructures->list[currentIndex].set(stubRoutine: entryLabel, base: structure); |
1736 | |
1737 | // Finally patch the jump to slow case back in the hot path to jump here instead. |
1738 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(offset: patchOffsetGetByIdBranchToSlowCase); |
1739 | RepatchBuffer repatchBuffer(m_codeBlock); |
1740 | repatchBuffer.relink(jump: jumpLocation, destination: entryLabel); |
1741 | } |
1742 | |
1743 | void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame) |
1744 | { |
1745 | // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is |
1746 | // referencing the prototype object - let's speculatively load it's table nice and early!) |
1747 | JSObject* protoObject = asObject(value: structure->prototypeForLookup(exec: callFrame)); |
1748 | |
1749 | // Check eax is an object of the right Structure. |
1750 | Jump failureCases1 = checkStructure(reg: regT0, structure); |
1751 | |
1752 | // Check the prototype object's Structure had not changed. |
1753 | Structure** prototypeStructureAddress = &(protoObject->m_structure); |
1754 | #if CPU(X86_64) |
1755 | move(imm: ImmPtr(prototypeStructure), dest: regT3); |
1756 | Jump failureCases2 = branchPtr(cond: NotEqual, left: AbsoluteAddress(prototypeStructureAddress), right: regT3); |
1757 | #else |
1758 | Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); |
1759 | #endif |
1760 | |
1761 | // Checks out okay! - getDirectOffset |
1762 | compileGetDirectOffset(base: protoObject, temp: regT1, result: regT0, cachedOffset); |
1763 | |
1764 | Jump success = jump(); |
1765 | |
1766 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
1767 | |
1768 | // Use the patch information to link the failure cases back to the original slow case routine. |
1769 | CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; |
1770 | patchBuffer.link(jump: failureCases1, label: lastProtoBegin); |
1771 | patchBuffer.link(jump: failureCases2, label: lastProtoBegin); |
1772 | |
1773 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. |
1774 | patchBuffer.link(jump: success, label: stubInfo->hotPathBegin.labelAtOffset(offset: patchOffsetGetByIdPutResult)); |
1775 | |
1776 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); |
1777 | |
1778 | structure->ref(); |
1779 | prototypeStructure->ref(); |
1780 | prototypeStructures->list[currentIndex].set(stubRoutine: entryLabel, base: structure, proto: prototypeStructure); |
1781 | |
1782 | // Finally patch the jump to slow case back in the hot path to jump here instead. |
1783 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(offset: patchOffsetGetByIdBranchToSlowCase); |
1784 | RepatchBuffer repatchBuffer(m_codeBlock); |
1785 | repatchBuffer.relink(jump: jumpLocation, destination: entryLabel); |
1786 | } |
1787 | |
1788 | void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame) |
1789 | { |
1790 | ASSERT(count); |
1791 | |
1792 | JumpList bucketsOfFail; |
1793 | |
1794 | // Check eax is an object of the right Structure. |
1795 | Jump baseObjectCheck = checkStructure(reg: regT0, structure); |
1796 | bucketsOfFail.append(jump: baseObjectCheck); |
1797 | |
1798 | Structure* currStructure = structure; |
1799 | RefPtr<Structure>* chainEntries = chain->head(); |
1800 | JSObject* protoObject = 0; |
1801 | for (unsigned i = 0; i < count; ++i) { |
1802 | protoObject = asObject(value: currStructure->prototypeForLookup(exec: callFrame)); |
1803 | currStructure = chainEntries[i].get(); |
1804 | |
1805 | // Check the prototype object's Structure had not changed. |
1806 | Structure** prototypeStructureAddress = &(protoObject->m_structure); |
1807 | #if CPU(X86_64) |
1808 | move(imm: ImmPtr(currStructure), dest: regT3); |
1809 | bucketsOfFail.append(jump: branchPtr(cond: NotEqual, left: AbsoluteAddress(prototypeStructureAddress), right: regT3)); |
1810 | #else |
1811 | bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); |
1812 | #endif |
1813 | } |
1814 | ASSERT(protoObject); |
1815 | |
1816 | compileGetDirectOffset(base: protoObject, temp: regT1, result: regT0, cachedOffset); |
1817 | Jump success = jump(); |
1818 | |
1819 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
1820 | |
1821 | // Use the patch information to link the failure cases back to the original slow case routine. |
1822 | CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; |
1823 | |
1824 | patchBuffer.link(list: bucketsOfFail, label: lastProtoBegin); |
1825 | |
1826 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. |
1827 | patchBuffer.link(jump: success, label: stubInfo->hotPathBegin.labelAtOffset(offset: patchOffsetGetByIdPutResult)); |
1828 | |
1829 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); |
1830 | |
1831 | // Track the stub we have created so that it will be deleted later. |
1832 | structure->ref(); |
1833 | chain->ref(); |
1834 | prototypeStructures->list[currentIndex].set(stubRoutine: entryLabel, base: structure, chain: chain); |
1835 | |
1836 | // Finally patch the jump to slow case back in the hot path to jump here instead. |
1837 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(offset: patchOffsetGetByIdBranchToSlowCase); |
1838 | RepatchBuffer repatchBuffer(m_codeBlock); |
1839 | repatchBuffer.relink(jump: jumpLocation, destination: entryLabel); |
1840 | } |
1841 | |
1842 | void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) |
1843 | { |
1844 | ASSERT(count); |
1845 | |
1846 | JumpList bucketsOfFail; |
1847 | |
1848 | // Check eax is an object of the right Structure. |
1849 | bucketsOfFail.append(jump: checkStructure(reg: regT0, structure)); |
1850 | |
1851 | Structure* currStructure = structure; |
1852 | RefPtr<Structure>* chainEntries = chain->head(); |
1853 | JSObject* protoObject = 0; |
1854 | for (unsigned i = 0; i < count; ++i) { |
1855 | protoObject = asObject(value: currStructure->prototypeForLookup(exec: callFrame)); |
1856 | currStructure = chainEntries[i].get(); |
1857 | |
1858 | // Check the prototype object's Structure had not changed. |
1859 | Structure** prototypeStructureAddress = &(protoObject->m_structure); |
1860 | #if CPU(X86_64) |
1861 | move(imm: ImmPtr(currStructure), dest: regT3); |
1862 | bucketsOfFail.append(jump: branchPtr(cond: NotEqual, left: AbsoluteAddress(prototypeStructureAddress), right: regT3)); |
1863 | #else |
1864 | bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); |
1865 | #endif |
1866 | } |
1867 | ASSERT(protoObject); |
1868 | |
1869 | compileGetDirectOffset(base: protoObject, temp: regT1, result: regT0, cachedOffset); |
1870 | Jump success = jump(); |
1871 | |
1872 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
1873 | |
1874 | // Use the patch information to link the failure cases back to the original slow case routine. |
1875 | patchBuffer.link(list: bucketsOfFail, label: stubInfo->callReturnLocation.labelAtOffset(offset: -patchOffsetGetByIdSlowCaseCall)); |
1876 | |
1877 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. |
1878 | patchBuffer.link(jump: success, label: stubInfo->hotPathBegin.labelAtOffset(offset: patchOffsetGetByIdPutResult)); |
1879 | |
1880 | // Track the stub we have created so that it will be deleted later. |
1881 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); |
1882 | stubInfo->stubRoutine = entryLabel; |
1883 | |
1884 | // Finally patch the jump to slow case back in the hot path to jump here instead. |
1885 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(offset: patchOffsetGetByIdBranchToSlowCase); |
1886 | RepatchBuffer repatchBuffer(m_codeBlock); |
1887 | repatchBuffer.relink(jump: jumpLocation, destination: entryLabel); |
1888 | |
1889 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. |
1890 | repatchBuffer.relinkCallerToFunction(returnAddress, function: FunctionPtr(cti_op_get_by_id_proto_list)); |
1891 | } |
1892 | |
1893 | /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */ |
1894 | |
1895 | #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
1896 | |
1897 | #endif // USE(JSVALUE32_64) |
1898 | |
1899 | } // namespace JSC |
1900 | |
1901 | #endif // ENABLE(JIT) |
1902 | |