1 | /* |
2 | * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. |
3 | * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
8 | * |
9 | * 1. Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * 2. Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
14 | * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of |
15 | * its contributors may be used to endorse or promote products derived |
16 | * from this software without specific prior written permission. |
17 | * |
18 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY |
19 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
20 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
21 | * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY |
22 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
23 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
24 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
25 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
28 | */ |
29 | |
30 | #ifndef BytecodeGenerator_h |
31 | #define BytecodeGenerator_h |
32 | |
33 | #include "CodeBlock.h" |
34 | #include "HashTraits.h" |
35 | #include "Instruction.h" |
36 | #include "Label.h" |
37 | #include "LabelScope.h" |
38 | #include "Interpreter.h" |
39 | #include "RegisterID.h" |
40 | #include "SymbolTable.h" |
41 | #include "Debugger.h" |
42 | #include "Nodes.h" |
43 | #include <wtf/FastAllocBase.h> |
44 | #include <wtf/PassRefPtr.h> |
45 | #include <wtf/SegmentedVector.h> |
46 | #include <wtf/Vector.h> |
47 | |
48 | namespace JSC { |
49 | |
50 | class Identifier; |
51 | class ScopeChain; |
52 | class ScopeNode; |
53 | |
54 | struct FinallyContext { |
55 | Label* finallyAddr; |
56 | RegisterID* retAddrDst; |
57 | }; |
58 | |
59 | struct ControlFlowContext { |
60 | bool isFinallyBlock; |
61 | FinallyContext finallyContext; |
62 | }; |
63 | |
64 | struct ForInContext { |
65 | RefPtr<RegisterID> expectedSubscriptRegister; |
66 | RefPtr<RegisterID> iterRegister; |
67 | RefPtr<RegisterID> indexRegister; |
68 | RefPtr<RegisterID> propertyRegister; |
69 | }; |
70 | |
71 | class BytecodeGenerator : public FastAllocBase { |
72 | public: |
73 | typedef DeclarationStacks::VarStack VarStack; |
74 | typedef DeclarationStacks::FunctionStack FunctionStack; |
75 | |
76 | static void setDumpsGeneratedCode(bool dumpsGeneratedCode); |
77 | static bool dumpsGeneratedCode(); |
78 | |
79 | BytecodeGenerator(ProgramNode*, const Debugger*, const ScopeChain&, SymbolTable*, ProgramCodeBlock*); |
80 | BytecodeGenerator(FunctionBodyNode*, const Debugger*, const ScopeChain&, SymbolTable*, CodeBlock*); |
81 | BytecodeGenerator(EvalNode*, const Debugger*, const ScopeChain&, SymbolTable*, EvalCodeBlock*); |
82 | |
83 | JSGlobalData* globalData() const { return m_globalData; } |
84 | const CommonIdentifiers& propertyNames() const { return *m_globalData->propertyNames; } |
85 | |
86 | void generate(); |
87 | |
88 | // Returns the register corresponding to a local variable, or 0 if no |
89 | // such register exists. Registers returned by registerFor do not |
90 | // require explicit reference counting. |
91 | RegisterID* registerFor(const Identifier&); |
92 | |
93 | bool willResolveToArguments(const Identifier&); |
94 | RegisterID* uncheckedRegisterForArguments(); |
95 | |
96 | // Behaves as registerFor does, but ignores dynamic scope as |
97 | // dynamic scope should not interfere with const initialisation |
98 | RegisterID* constRegisterFor(const Identifier&); |
99 | |
100 | // Searches the scope chain in an attempt to statically locate the requested |
101 | // property. Returns false if for any reason the property cannot be safely |
102 | // optimised at all. Otherwise it will return the index and depth of the |
103 | // VariableObject that defines the property. If the property cannot be found |
104 | // statically, depth will contain the depth of the scope chain where dynamic |
105 | // lookup must begin. |
106 | // |
107 | // NB: depth does _not_ include the local scope. eg. a depth of 0 refers |
108 | // to the scope containing this codeblock. |
109 | bool findScopedProperty(const Identifier&, int& index, size_t& depth, bool forWriting, JSObject*& globalObject); |
110 | |
111 | // Returns the register storing "this" |
112 | RegisterID* thisRegister() { return &m_thisRegister; } |
113 | |
114 | bool isLocal(const Identifier&); |
115 | bool isLocalConstant(const Identifier&); |
116 | |
117 | // Returns the next available temporary register. Registers returned by |
118 | // newTemporary require a modified form of reference counting: any |
119 | // register with a refcount of 0 is considered "available", meaning that |
120 | // the next instruction may overwrite it. |
121 | RegisterID* newTemporary(); |
122 | |
123 | RegisterID* highestUsedRegister(); |
124 | |
125 | // The same as newTemporary(), but this function returns "suggestion" if |
126 | // "suggestion" is a temporary. This function is helpful in situations |
127 | // where you've put "suggestion" in a RefPtr, but you'd like to allow |
128 | // the next instruction to overwrite it anyway. |
129 | RegisterID* newTemporaryOr(RegisterID* suggestion) { return suggestion->isTemporary() ? suggestion : newTemporary(); } |
130 | |
131 | // Functions for handling of dst register |
132 | |
133 | RegisterID* ignoredResult() { return &m_ignoredResultRegister; } |
134 | |
135 | // Returns a place to write intermediate values of an operation |
136 | // which reuses dst if it is safe to do so. |
137 | RegisterID* tempDestination(RegisterID* dst) |
138 | { |
139 | return (dst && dst != ignoredResult() && dst->isTemporary()) ? dst : newTemporary(); |
140 | } |
141 | |
142 | // Returns the place to write the final output of an operation. |
143 | RegisterID* finalDestination(RegisterID* originalDst, RegisterID* tempDst = 0) |
144 | { |
145 | if (originalDst && originalDst != ignoredResult()) |
146 | return originalDst; |
147 | ASSERT(tempDst != ignoredResult()); |
148 | if (tempDst && tempDst->isTemporary()) |
149 | return tempDst; |
150 | return newTemporary(); |
151 | } |
152 | |
153 | RegisterID* destinationForAssignResult(RegisterID* dst) |
154 | { |
155 | if (dst && dst != ignoredResult() && m_codeBlock->needsFullScopeChain()) |
156 | return dst->isTemporary() ? dst : newTemporary(); |
157 | return 0; |
158 | } |
159 | |
160 | // Moves src to dst if dst is not null and is different from src, otherwise just returns src. |
161 | RegisterID* moveToDestinationIfNeeded(RegisterID* dst, RegisterID* src) |
162 | { |
163 | return dst == ignoredResult() ? 0 : (dst && dst != src) ? emitMove(dst, src) : src; |
164 | } |
165 | |
166 | PassRefPtr<LabelScope> newLabelScope(LabelScope::Type, const Identifier* = 0); |
167 | PassRefPtr<Label> newLabel(); |
168 | |
169 | // The emitNode functions are just syntactic sugar for calling |
170 | // Node::emitCode. These functions accept a 0 for the register, |
171 | // meaning that the node should allocate a register, or ignoredResult(), |
172 | // meaning that the node need not put the result in a register. |
173 | // Other emit functions do not accept 0 or ignoredResult(). |
174 | RegisterID* emitNode(RegisterID* dst, Node* n) |
175 | { |
176 | // Node::emitCode assumes that dst, if provided, is either a local or a referenced temporary. |
177 | ASSERT(!dst || dst == ignoredResult() || !dst->isTemporary() || dst->refCount()); |
178 | if (!m_codeBlock->numberOfLineInfos() || m_codeBlock->lastLineInfo().lineNumber != n->lineNo()) { |
179 | LineInfo info = { .instructionOffset: static_cast<uint32_t>(instructions().size()), .lineNumber: n->lineNo() }; |
180 | m_codeBlock->addLineInfo(lineInfo: info); |
181 | } |
182 | if (m_emitNodeDepth >= s_maxEmitNodeDepth) |
183 | return emitThrowExpressionTooDeepException(); |
184 | ++m_emitNodeDepth; |
185 | RegisterID* r = n->emitBytecode(*this, destination: dst); |
186 | --m_emitNodeDepth; |
187 | return r; |
188 | } |
189 | |
190 | RegisterID* emitNode(Node* n) |
191 | { |
192 | return emitNode(dst: 0, n); |
193 | } |
194 | |
195 | void emitNodeInConditionContext(ExpressionNode* n, Label* trueTarget, Label* falseTarget, bool fallThroughMeansTrue) |
196 | { |
197 | if (!m_codeBlock->numberOfLineInfos() || m_codeBlock->lastLineInfo().lineNumber != n->lineNo()) { |
198 | LineInfo info = { .instructionOffset: static_cast<uint32_t>(instructions().size()), .lineNumber: n->lineNo() }; |
199 | m_codeBlock->addLineInfo(lineInfo: info); |
200 | } |
201 | if (m_emitNodeDepth >= s_maxEmitNodeDepth) |
202 | emitThrowExpressionTooDeepException(); |
203 | ++m_emitNodeDepth; |
204 | n->emitBytecodeInConditionContext(*this, trueTarget, falseTarget, fallThroughMeansTrue); |
205 | --m_emitNodeDepth; |
206 | } |
207 | |
208 | void emitExpressionInfo(unsigned divot, unsigned startOffset, unsigned endOffset) |
209 | { |
210 | divot -= m_codeBlock->sourceOffset(); |
211 | if (divot > ExpressionRangeInfo::MaxDivot) { |
212 | // Overflow has occurred, we can only give line number info for errors for this region |
213 | divot = 0; |
214 | startOffset = 0; |
215 | endOffset = 0; |
216 | } else if (startOffset > ExpressionRangeInfo::MaxOffset) { |
217 | // If the start offset is out of bounds we clear both offsets |
218 | // so we only get the divot marker. Error message will have to be reduced |
219 | // to line and column number. |
220 | startOffset = 0; |
221 | endOffset = 0; |
222 | } else if (endOffset > ExpressionRangeInfo::MaxOffset) { |
223 | // The end offset is only used for additional context, and is much more likely |
224 | // to overflow (eg. function call arguments) so we are willing to drop it without |
225 | // dropping the rest of the range. |
226 | endOffset = 0; |
227 | } |
228 | |
229 | ExpressionRangeInfo info; |
230 | info.instructionOffset = instructions().size(); |
231 | info.divotPoint = divot; |
232 | info.startOffset = startOffset; |
233 | info.endOffset = endOffset; |
234 | m_codeBlock->addExpressionInfo(expressionInfo: info); |
235 | } |
236 | |
237 | void emitGetByIdExceptionInfo(OpcodeID opcodeID) |
238 | { |
239 | // Only op_construct and op_instanceof need exception info for |
240 | // a preceding op_get_by_id. |
241 | ASSERT(opcodeID == op_construct || opcodeID == op_instanceof); |
242 | GetByIdExceptionInfo info; |
243 | info.bytecodeOffset = instructions().size(); |
244 | info.isOpConstruct = (opcodeID == op_construct); |
245 | m_codeBlock->addGetByIdExceptionInfo(info); |
246 | } |
247 | |
248 | ALWAYS_INLINE bool leftHandSideNeedsCopy(bool rightHasAssignments, bool rightIsPure) |
249 | { |
250 | return (m_codeType != FunctionCode || m_codeBlock->needsFullScopeChain() || rightHasAssignments) && !rightIsPure; |
251 | } |
252 | |
253 | ALWAYS_INLINE PassRefPtr<RegisterID> emitNodeForLeftHandSide(ExpressionNode* n, bool rightHasAssignments, bool rightIsPure) |
254 | { |
255 | if (leftHandSideNeedsCopy(rightHasAssignments, rightIsPure)) { |
256 | PassRefPtr<RegisterID> dst = newTemporary(); |
257 | emitNode(dst: dst.get(), n); |
258 | return dst; |
259 | } |
260 | |
261 | return PassRefPtr<RegisterID>(emitNode(n)); |
262 | } |
263 | |
264 | RegisterID* emitLoad(RegisterID* dst, bool); |
265 | RegisterID* emitLoad(RegisterID* dst, double); |
266 | RegisterID* emitLoad(RegisterID* dst, const Identifier&); |
267 | RegisterID* emitLoad(RegisterID* dst, JSValue); |
268 | |
269 | RegisterID* emitUnaryOp(OpcodeID, RegisterID* dst, RegisterID* src); |
270 | RegisterID* emitBinaryOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes); |
271 | RegisterID* emitEqualityOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2); |
272 | RegisterID* emitUnaryNoDstOp(OpcodeID, RegisterID* src); |
273 | |
274 | RegisterID* emitNewObject(RegisterID* dst); |
275 | RegisterID* emitNewArray(RegisterID* dst, ElementNode*); // stops at first elision |
276 | |
277 | RegisterID* emitNewFunction(RegisterID* dst, FunctionBodyNode* body); |
278 | RegisterID* emitNewFunctionExpression(RegisterID* dst, FuncExprNode* func); |
279 | RegisterID* emitNewRegExp(RegisterID* dst, RegExp* regExp); |
280 | |
281 | RegisterID* emitMove(RegisterID* dst, RegisterID* src); |
282 | |
283 | RegisterID* emitToJSNumber(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_to_jsnumber, dst, src); } |
284 | RegisterID* emitPreInc(RegisterID* srcDst); |
285 | RegisterID* emitPreDec(RegisterID* srcDst); |
286 | RegisterID* emitPostInc(RegisterID* dst, RegisterID* srcDst); |
287 | RegisterID* emitPostDec(RegisterID* dst, RegisterID* srcDst); |
288 | |
289 | RegisterID* emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* base, RegisterID* basePrototype); |
290 | RegisterID* emitTypeOf(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_typeof, dst, src); } |
291 | RegisterID* emitIn(RegisterID* dst, RegisterID* property, RegisterID* base) { return emitBinaryOp(op_in, dst, src1: property, src2: base, OperandTypes()); } |
292 | |
293 | RegisterID* emitResolve(RegisterID* dst, const Identifier& property); |
294 | RegisterID* emitGetScopedVar(RegisterID* dst, size_t skip, int index, JSValue globalObject); |
295 | RegisterID* emitPutScopedVar(size_t skip, int index, RegisterID* value, JSValue globalObject); |
296 | |
297 | RegisterID* emitResolveBase(RegisterID* dst, const Identifier& property); |
298 | RegisterID* emitResolveWithBase(RegisterID* baseDst, RegisterID* propDst, const Identifier& property); |
299 | |
300 | void emitMethodCheck(); |
301 | |
302 | RegisterID* emitGetById(RegisterID* dst, RegisterID* base, const Identifier& property); |
303 | RegisterID* emitPutById(RegisterID* base, const Identifier& property, RegisterID* value); |
304 | RegisterID* emitDeleteById(RegisterID* dst, RegisterID* base, const Identifier&); |
305 | RegisterID* emitGetByVal(RegisterID* dst, RegisterID* base, RegisterID* property); |
306 | RegisterID* emitPutByVal(RegisterID* base, RegisterID* property, RegisterID* value); |
307 | RegisterID* emitDeleteByVal(RegisterID* dst, RegisterID* base, RegisterID* property); |
308 | RegisterID* emitPutByIndex(RegisterID* base, unsigned index, RegisterID* value); |
309 | RegisterID* emitPutGetter(RegisterID* base, const Identifier& property, RegisterID* value); |
310 | RegisterID* emitPutSetter(RegisterID* base, const Identifier& property, RegisterID* value); |
311 | |
312 | RegisterID* emitCall(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode*, unsigned divot, unsigned startOffset, unsigned endOffset); |
313 | RegisterID* emitCallEval(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode*, unsigned divot, unsigned startOffset, unsigned endOffset); |
314 | RegisterID* emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* argCount, unsigned divot, unsigned startOffset, unsigned endOffset); |
315 | RegisterID* emitLoadVarargs(RegisterID* argCountDst, RegisterID* args); |
316 | |
317 | RegisterID* emitReturn(RegisterID* src); |
318 | RegisterID* emitEnd(RegisterID* src) { return emitUnaryNoDstOp(op_end, src); } |
319 | |
320 | RegisterID* emitConstruct(RegisterID* dst, RegisterID* func, ArgumentsNode*, unsigned divot, unsigned startOffset, unsigned endOffset); |
321 | RegisterID* emitStrcat(RegisterID* dst, RegisterID* src, int count); |
322 | void emitToPrimitive(RegisterID* dst, RegisterID* src); |
323 | |
324 | PassRefPtr<Label> emitLabel(Label*); |
325 | PassRefPtr<Label> emitJump(Label* target); |
326 | PassRefPtr<Label> emitJumpIfTrue(RegisterID* cond, Label* target); |
327 | PassRefPtr<Label> emitJumpIfFalse(RegisterID* cond, Label* target); |
328 | PassRefPtr<Label> emitJumpIfNotFunctionCall(RegisterID* cond, Label* target); |
329 | PassRefPtr<Label> emitJumpIfNotFunctionApply(RegisterID* cond, Label* target); |
330 | PassRefPtr<Label> emitJumpScopes(Label* target, int targetScopeDepth); |
331 | |
332 | PassRefPtr<Label> emitJumpSubroutine(RegisterID* retAddrDst, Label*); |
333 | void emitSubroutineReturn(RegisterID* retAddrSrc); |
334 | |
335 | RegisterID* emitGetPropertyNames(RegisterID* dst, RegisterID* base, RegisterID* i, RegisterID* size, Label* breakTarget); |
336 | RegisterID* emitNextPropertyName(RegisterID* dst, RegisterID* base, RegisterID* i, RegisterID* size, RegisterID* iter, Label* target); |
337 | |
338 | RegisterID* emitCatch(RegisterID*, Label* start, Label* end); |
339 | void emitThrow(RegisterID* exc) { emitUnaryNoDstOp(op_throw, src: exc); } |
340 | RegisterID* emitNewError(RegisterID* dst, ErrorType type, JSValue message); |
341 | void emitPushNewScope(RegisterID* dst, const Identifier& property, RegisterID* value); |
342 | |
343 | RegisterID* emitPushScope(RegisterID* scope); |
344 | void emitPopScope(); |
345 | |
346 | void emitDebugHook(DebugHookID, int firstLine, int lastLine); |
347 | |
348 | int scopeDepth() { return m_dynamicScopeDepth + m_finallyDepth; } |
349 | bool hasFinaliser() { return m_finallyDepth != 0; } |
350 | |
351 | void pushFinallyContext(Label* target, RegisterID* returnAddrDst); |
352 | void popFinallyContext(); |
353 | |
354 | void pushOptimisedForIn(RegisterID* expectedBase, RegisterID* iter, RegisterID* index, RegisterID* propertyRegister) |
355 | { |
356 | ForInContext context = { .expectedSubscriptRegister: expectedBase, .iterRegister: iter, .indexRegister: index, .propertyRegister: propertyRegister }; |
357 | m_forInContextStack.append(val: context); |
358 | } |
359 | |
360 | void popOptimisedForIn() |
361 | { |
362 | m_forInContextStack.removeLast(); |
363 | } |
364 | |
365 | LabelScope* breakTarget(const Identifier&); |
366 | LabelScope* continueTarget(const Identifier&); |
367 | |
368 | void beginSwitch(RegisterID*, SwitchInfo::SwitchType); |
369 | void endSwitch(uint32_t clauseCount, RefPtr<Label>*, ExpressionNode**, Label* defaultLabel, int32_t min, int32_t range); |
370 | |
371 | CodeType codeType() const { return m_codeType; } |
372 | |
373 | void setRegeneratingForExceptionInfo(CodeBlock* originalCodeBlock) |
374 | { |
375 | m_regeneratingForExceptionInfo = true; |
376 | m_codeBlockBeingRegeneratedFrom = originalCodeBlock; |
377 | } |
378 | |
379 | private: |
380 | void emitOpcode(OpcodeID); |
381 | void retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index); |
382 | void retrieveLastUnaryOp(int& dstIndex, int& srcIndex); |
383 | void rewindBinaryOp(); |
384 | void rewindUnaryOp(); |
385 | |
386 | PassRefPtr<Label> emitComplexJumpScopes(Label* target, ControlFlowContext* topScope, ControlFlowContext* bottomScope); |
387 | |
388 | typedef HashMap<EncodedJSValue, unsigned, EncodedJSValueHash, EncodedJSValueHashTraits> JSValueMap; |
389 | |
390 | struct IdentifierMapIndexHashTraits { |
391 | typedef int TraitType; |
392 | typedef IdentifierMapIndexHashTraits StorageTraits; |
393 | static int emptyValue() { return std::numeric_limits<int>::max(); } |
394 | static const bool emptyValueIsZero = false; |
395 | static const bool needsDestruction = false; |
396 | static const bool needsRef = false; |
397 | }; |
398 | |
399 | typedef HashMap<RefPtr<UString::Rep>, int, IdentifierRepHash, HashTraits<RefPtr<UString::Rep> >, IdentifierMapIndexHashTraits> IdentifierMap; |
400 | typedef HashMap<double, JSValue> NumberMap; |
401 | typedef HashMap<UString::Rep*, JSString*, IdentifierRepHash> IdentifierStringMap; |
402 | |
403 | RegisterID* emitCall(OpcodeID, RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode*, unsigned divot, unsigned startOffset, unsigned endOffset); |
404 | |
405 | RegisterID* newRegister(); |
406 | |
407 | // Returns the RegisterID corresponding to ident. |
408 | RegisterID* addVar(const Identifier& ident, bool isConstant) |
409 | { |
410 | RegisterID* local; |
411 | addVar(ident, isConstant, local); |
412 | return local; |
413 | } |
414 | // Returns true if a new RegisterID was added, false if a pre-existing RegisterID was re-used. |
415 | bool addVar(const Identifier&, bool isConstant, RegisterID*&); |
416 | |
417 | // Returns the RegisterID corresponding to ident. |
418 | RegisterID* addGlobalVar(const Identifier& ident, bool isConstant) |
419 | { |
420 | RegisterID* local; |
421 | addGlobalVar(ident, isConstant, local); |
422 | return local; |
423 | } |
424 | // Returns true if a new RegisterID was added, false if a pre-existing RegisterID was re-used. |
425 | bool addGlobalVar(const Identifier&, bool isConstant, RegisterID*&); |
426 | |
427 | RegisterID* addParameter(const Identifier&); |
428 | |
429 | void preserveLastVar(); |
430 | |
431 | RegisterID& registerFor(int index) |
432 | { |
433 | if (index >= 0) |
434 | return m_calleeRegisters[index]; |
435 | |
436 | if (index == RegisterFile::OptionalCalleeArguments) |
437 | return m_argumentsRegister; |
438 | |
439 | if (m_parameters.size()) { |
440 | ASSERT(!m_globals.size()); |
441 | return m_parameters[index + m_parameters.size() + RegisterFile::CallFrameHeaderSize]; |
442 | } |
443 | |
444 | return m_globals[-index - 1]; |
445 | } |
446 | |
447 | unsigned addConstant(const Identifier&); |
448 | RegisterID* addConstantValue(JSValue); |
449 | unsigned addRegExp(RegExp*); |
450 | |
451 | PassRefPtr<FunctionExecutable> makeFunction(ExecState* exec, FunctionBodyNode* body) |
452 | { |
453 | return FunctionExecutable::create(exec, name: body->ident(), source: body->source(), forceUsesArguments: body->usesArguments(), parameters: body->parameters(), firstLine: body->lineNo(), lastLine: body->lastLine()); |
454 | } |
455 | |
456 | PassRefPtr<FunctionExecutable> makeFunction(JSGlobalData* globalData, FunctionBodyNode* body) |
457 | { |
458 | return FunctionExecutable::create(globalData, name: body->ident(), source: body->source(), forceUsesArguments: body->usesArguments(), parameters: body->parameters(), firstLine: body->lineNo(), lastLine: body->lastLine()); |
459 | } |
460 | |
461 | Vector<Instruction>& instructions() { return m_codeBlock->instructions(); } |
462 | SymbolTable& symbolTable() { return *m_symbolTable; } |
463 | |
464 | bool shouldOptimizeLocals() { return (m_codeType != EvalCode) && !m_dynamicScopeDepth; } |
465 | bool canOptimizeNonLocals() { return (m_codeType == FunctionCode) && !m_dynamicScopeDepth && !m_codeBlock->usesEval(); } |
466 | |
467 | RegisterID* emitThrowExpressionTooDeepException(); |
468 | |
469 | void createArgumentsIfNecessary(); |
470 | |
471 | bool m_shouldEmitDebugHooks; |
472 | bool m_shouldEmitProfileHooks; |
473 | |
474 | const ScopeChain* m_scopeChain; |
475 | SymbolTable* m_symbolTable; |
476 | |
477 | ScopeNode* m_scopeNode; |
478 | CodeBlock* m_codeBlock; |
479 | |
480 | // Some of these objects keep pointers to one another. They are arranged |
481 | // to ensure a sane destruction order that avoids references to freed memory. |
482 | HashSet<RefPtr<UString::Rep>, IdentifierRepHash> m_functions; |
483 | RegisterID m_ignoredResultRegister; |
484 | RegisterID m_thisRegister; |
485 | RegisterID m_argumentsRegister; |
486 | int m_activationRegisterIndex; |
487 | SegmentedVector<RegisterID, 32> m_constantPoolRegisters; |
488 | SegmentedVector<RegisterID, 32> m_calleeRegisters; |
489 | SegmentedVector<RegisterID, 32> m_parameters; |
490 | SegmentedVector<RegisterID, 32> m_globals; |
491 | SegmentedVector<Label, 32> m_labels; |
492 | SegmentedVector<LabelScope, 8> m_labelScopes; |
493 | RefPtr<RegisterID> m_lastVar; |
494 | int m_finallyDepth; |
495 | int m_dynamicScopeDepth; |
496 | int m_baseScopeDepth; |
497 | CodeType m_codeType; |
498 | |
499 | Vector<ControlFlowContext> m_scopeContextStack; |
500 | Vector<SwitchInfo> m_switchContextStack; |
501 | Vector<ForInContext> m_forInContextStack; |
502 | |
503 | int m_nextGlobalIndex; |
504 | int m_nextParameterIndex; |
505 | int m_firstConstantIndex; |
506 | int m_nextConstantOffset; |
507 | unsigned m_globalConstantIndex; |
508 | |
509 | int m_globalVarStorageOffset; |
510 | |
511 | // Constant pool |
512 | IdentifierMap m_identifierMap; |
513 | JSValueMap m_jsValueMap; |
514 | NumberMap m_numberMap; |
515 | IdentifierStringMap m_stringMap; |
516 | |
517 | JSGlobalData* m_globalData; |
518 | |
519 | OpcodeID m_lastOpcodeID; |
520 | |
521 | unsigned m_emitNodeDepth; |
522 | |
523 | bool m_regeneratingForExceptionInfo; |
524 | CodeBlock* m_codeBlockBeingRegeneratedFrom; |
525 | |
526 | static const unsigned s_maxEmitNodeDepth = 5000; |
527 | }; |
528 | |
529 | } |
530 | |
531 | #endif // BytecodeGenerator_h |
532 | |