1 /*
2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include "config.h"
31 #include "BytecodeGenerator.h"
32
33 #include "BatchedTransitionOptimizer.h"
34 #include "PrototypeFunction.h"
35 #include "JSFunction.h"
36 #include "Interpreter.h"
37 #include "UString.h"
38
39 using namespace std;
40
41 namespace JSC {
42
43 /*
44 The layout of a register frame looks like this:
45
46 For
47
48 function f(x, y) {
49 var v1;
50 function g() { }
51 var v2;
52 return (x) * (y);
53 }
54
55 assuming (x) and (y) generated temporaries t1 and t2, you would have
56
57 ------------------------------------
58 | x | y | g | v2 | v1 | t1 | t2 | <-- value held
59 ------------------------------------
60 | -5 | -4 | -3 | -2 | -1 | +0 | +1 | <-- register index
61 ------------------------------------
62 | params->|<-locals | temps->
63
64 Because temporary registers are allocated in a stack-like fashion, we
65 can reclaim them with a simple popping algorithm. The same goes for labels.
66 (We never reclaim parameter or local registers, because parameters and
67 locals are DontDelete.)
68
69 The register layout before a function call looks like this:
70
71 For
72
73 function f(x, y)
74 {
75 }
76
77 f(1);
78
79 > <------------------------------
80 < > reserved: call frame | 1 | <-- value held
81 > >snip< <------------------------------
82 < > +0 | +1 | +2 | +3 | +4 | +5 | <-- register index
83 > <------------------------------
84 | params->|<-locals | temps->
85
86 The call instruction fills in the "call frame" registers. It also pads
87 missing arguments at the end of the call:
88
89 > <-----------------------------------
90 < > reserved: call frame | 1 | ? | <-- value held ("?" stands for "undefined")
91 > >snip< <-----------------------------------
92 < > +0 | +1 | +2 | +3 | +4 | +5 | +6 | <-- register index
93 > <-----------------------------------
94 | params->|<-locals | temps->
95
96 After filling in missing arguments, the call instruction sets up the new
97 stack frame to overlap the end of the old stack frame:
98
99 |----------------------------------> <
100 | reserved: call frame | 1 | ? < > <-- value held ("?" stands for "undefined")
101 |----------------------------------> >snip< <
102 | -7 | -6 | -5 | -4 | -3 | -2 | -1 < > <-- register index
103 |----------------------------------> <
104 | | params->|<-locals | temps->
105
106 That way, arguments are "copied" into the callee's stack frame for free.
107
108 If the caller supplies too many arguments, this trick doesn't work. The
109 extra arguments protrude into space reserved for locals and temporaries.
110 In that case, the call instruction makes a real copy of the call frame header,
111 along with just the arguments expected by the callee, leaving the original
112 call frame header and arguments behind. (The call instruction can't just discard
113 extra arguments, because the "arguments" object may access them later.)
114 This copying strategy ensures that all named values will be at the indices
115 expected by the callee.
116 */
117
118 #ifndef NDEBUG
119 static bool s_dumpsGeneratedCode = false;
120 #endif
121
setDumpsGeneratedCode(bool dumpsGeneratedCode)122 void BytecodeGenerator::setDumpsGeneratedCode(bool dumpsGeneratedCode)
123 {
124 #ifndef NDEBUG
125 s_dumpsGeneratedCode = dumpsGeneratedCode;
126 #else
127 UNUSED_PARAM(dumpsGeneratedCode);
128 #endif
129 }
130
dumpsGeneratedCode()131 bool BytecodeGenerator::dumpsGeneratedCode()
132 {
133 #ifndef NDEBUG
134 return s_dumpsGeneratedCode;
135 #else
136 return false;
137 #endif
138 }
139
generate()140 void BytecodeGenerator::generate()
141 {
142 m_codeBlock->setThisRegister(m_thisRegister.index());
143
144 m_scopeNode->emitBytecode(*this);
145
146 #ifndef NDEBUG
147 m_codeBlock->setInstructionCount(m_codeBlock->instructions().size());
148
149 if (s_dumpsGeneratedCode)
150 m_codeBlock->dump(m_scopeChain->globalObject()->globalExec());
151 #endif
152
153 if ((m_codeType == FunctionCode && !m_codeBlock->needsFullScopeChain() && !m_codeBlock->usesArguments()) || m_codeType == EvalCode)
154 symbolTable().clear();
155
156 m_codeBlock->setIsNumericCompareFunction(instructions() == m_globalData->numericCompareFunction(m_scopeChain->globalObject()->globalExec()));
157
158 #if !ENABLE(OPCODE_SAMPLING)
159 if (!m_regeneratingForExceptionInfo && (m_codeType == FunctionCode || m_codeType == EvalCode))
160 m_codeBlock->clearExceptionInfo();
161 #endif
162
163 m_codeBlock->shrinkToFit();
164 }
165
addVar(const Identifier & ident,bool isConstant,RegisterID * & r0)166 bool BytecodeGenerator::addVar(const Identifier& ident, bool isConstant, RegisterID*& r0)
167 {
168 int index = m_calleeRegisters.size();
169 SymbolTableEntry newEntry(index, isConstant ? ReadOnly : 0);
170 pair<SymbolTable::iterator, bool> result = symbolTable().add(ident.ustring().rep(), newEntry);
171
172 if (!result.second) {
173 r0 = ®isterFor(result.first->second.getIndex());
174 return false;
175 }
176
177 ++m_codeBlock->m_numVars;
178 r0 = newRegister();
179 return true;
180 }
181
addGlobalVar(const Identifier & ident,bool isConstant,RegisterID * & r0)182 bool BytecodeGenerator::addGlobalVar(const Identifier& ident, bool isConstant, RegisterID*& r0)
183 {
184 int index = m_nextGlobalIndex;
185 SymbolTableEntry newEntry(index, isConstant ? ReadOnly : 0);
186 pair<SymbolTable::iterator, bool> result = symbolTable().add(ident.ustring().rep(), newEntry);
187
188 if (!result.second)
189 index = result.first->second.getIndex();
190 else {
191 --m_nextGlobalIndex;
192 m_globals.append(index + m_globalVarStorageOffset);
193 }
194
195 r0 = ®isterFor(index);
196 return result.second;
197 }
198
preserveLastVar()199 void BytecodeGenerator::preserveLastVar()
200 {
201 if ((m_firstConstantIndex = m_calleeRegisters.size()) != 0)
202 m_lastVar = &m_calleeRegisters.last();
203 }
204
BytecodeGenerator(ProgramNode * programNode,const Debugger * debugger,const ScopeChain & scopeChain,SymbolTable * symbolTable,ProgramCodeBlock * codeBlock)205 BytecodeGenerator::BytecodeGenerator(ProgramNode* programNode, const Debugger* debugger, const ScopeChain& scopeChain, SymbolTable* symbolTable, ProgramCodeBlock* codeBlock)
206 : m_shouldEmitDebugHooks(!!debugger)
207 , m_shouldEmitProfileHooks(scopeChain.globalObject()->supportsProfiling())
208 , m_scopeChain(&scopeChain)
209 , m_symbolTable(symbolTable)
210 , m_scopeNode(programNode)
211 , m_codeBlock(codeBlock)
212 , m_thisRegister(RegisterFile::ProgramCodeThisRegister)
213 , m_finallyDepth(0)
214 , m_dynamicScopeDepth(0)
215 , m_baseScopeDepth(0)
216 , m_codeType(GlobalCode)
217 , m_nextGlobalIndex(-1)
218 , m_nextConstantOffset(0)
219 , m_globalConstantIndex(0)
220 , m_globalData(&scopeChain.globalObject()->globalExec()->globalData())
221 , m_lastOpcodeID(op_end)
222 , m_emitNodeDepth(0)
223 , m_regeneratingForExceptionInfo(false)
224 , m_codeBlockBeingRegeneratedFrom(0)
225 {
226 if (m_shouldEmitDebugHooks)
227 m_codeBlock->setNeedsFullScopeChain(true);
228
229 emitOpcode(op_enter);
230 codeBlock->setGlobalData(m_globalData);
231
232 // FIXME: Move code that modifies the global object to Interpreter::execute.
233
234 m_codeBlock->m_numParameters = 1; // Allocate space for "this"
235
236 JSGlobalObject* globalObject = scopeChain.globalObject();
237 ExecState* exec = globalObject->globalExec();
238 RegisterFile* registerFile = &exec->globalData().interpreter->registerFile();
239
240 // Shift register indexes in generated code to elide registers allocated by intermediate stack frames.
241 m_globalVarStorageOffset = -RegisterFile::CallFrameHeaderSize - m_codeBlock->m_numParameters - registerFile->size();
242
243 // Add previously defined symbols to bookkeeping.
244 m_globals.grow(symbolTable->size());
245 SymbolTable::iterator end = symbolTable->end();
246 for (SymbolTable::iterator it = symbolTable->begin(); it != end; ++it)
247 registerFor(it->second.getIndex()).setIndex(it->second.getIndex() + m_globalVarStorageOffset);
248
249 BatchedTransitionOptimizer optimizer(globalObject);
250
251 const VarStack& varStack = programNode->varStack();
252 const FunctionStack& functionStack = programNode->functionStack();
253 bool canOptimizeNewGlobals = symbolTable->size() + functionStack.size() + varStack.size() < registerFile->maxGlobals();
254 if (canOptimizeNewGlobals) {
255 // Shift new symbols so they get stored prior to existing symbols.
256 m_nextGlobalIndex -= symbolTable->size();
257
258 for (size_t i = 0; i < functionStack.size(); ++i) {
259 FuncDeclNode* funcDecl = functionStack[i];
260 globalObject->removeDirect(funcDecl->m_ident); // Make sure our new function is not shadowed by an old property.
261 emitNewFunction(addGlobalVar(funcDecl->m_ident, false), funcDecl);
262 }
263
264 Vector<RegisterID*, 32> newVars;
265 for (size_t i = 0; i < varStack.size(); ++i)
266 if (!globalObject->hasProperty(exec, varStack[i].first))
267 newVars.append(addGlobalVar(varStack[i].first, varStack[i].second & DeclarationStacks::IsConstant));
268
269 preserveLastVar();
270
271 for (size_t i = 0; i < newVars.size(); ++i)
272 emitLoad(newVars[i], jsUndefined());
273 } else {
274 for (size_t i = 0; i < functionStack.size(); ++i) {
275 FuncDeclNode* funcDecl = functionStack[i];
276 globalObject->putWithAttributes(exec, funcDecl->m_ident, funcDecl->makeFunction(exec, scopeChain.node()), DontDelete);
277 }
278 for (size_t i = 0; i < varStack.size(); ++i) {
279 if (globalObject->hasProperty(exec, varStack[i].first))
280 continue;
281 int attributes = DontDelete;
282 if (varStack[i].second & DeclarationStacks::IsConstant)
283 attributes |= ReadOnly;
284 globalObject->putWithAttributes(exec, varStack[i].first, jsUndefined(), attributes);
285 }
286
287 preserveLastVar();
288 }
289 }
290
BytecodeGenerator(FunctionBodyNode * functionBody,const Debugger * debugger,const ScopeChain & scopeChain,SymbolTable * symbolTable,CodeBlock * codeBlock)291 BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, const Debugger* debugger, const ScopeChain& scopeChain, SymbolTable* symbolTable, CodeBlock* codeBlock)
292 : m_shouldEmitDebugHooks(!!debugger)
293 , m_shouldEmitProfileHooks(scopeChain.globalObject()->supportsProfiling())
294 , m_scopeChain(&scopeChain)
295 , m_symbolTable(symbolTable)
296 , m_scopeNode(functionBody)
297 , m_codeBlock(codeBlock)
298 , m_finallyDepth(0)
299 , m_dynamicScopeDepth(0)
300 , m_baseScopeDepth(0)
301 , m_codeType(FunctionCode)
302 , m_nextConstantOffset(0)
303 , m_globalConstantIndex(0)
304 , m_globalData(&scopeChain.globalObject()->globalExec()->globalData())
305 , m_lastOpcodeID(op_end)
306 , m_emitNodeDepth(0)
307 , m_regeneratingForExceptionInfo(false)
308 , m_codeBlockBeingRegeneratedFrom(0)
309 {
310 if (m_shouldEmitDebugHooks)
311 m_codeBlock->setNeedsFullScopeChain(true);
312
313 codeBlock->setGlobalData(m_globalData);
314
315 bool usesArguments = functionBody->usesArguments();
316 codeBlock->setUsesArguments(usesArguments);
317 if (usesArguments) {
318 m_argumentsRegister.setIndex(RegisterFile::OptionalCalleeArguments);
319 addVar(propertyNames().arguments, false);
320 }
321
322 if (m_codeBlock->needsFullScopeChain()) {
323 ++m_codeBlock->m_numVars;
324 m_activationRegisterIndex = newRegister()->index();
325 emitOpcode(op_enter_with_activation);
326 instructions().append(m_activationRegisterIndex);
327 } else
328 emitOpcode(op_enter);
329
330 if (usesArguments) {
331 emitOpcode(op_init_arguments);
332
333 // The debugger currently retrieves the arguments object from an activation rather than pulling
334 // it from a call frame. In the long-term it should stop doing that (<rdar://problem/6911886>),
335 // but for now we force eager creation of the arguments object when debugging.
336 if (m_shouldEmitDebugHooks)
337 emitOpcode(op_create_arguments);
338 }
339
340 const DeclarationStacks::FunctionStack& functionStack = functionBody->functionStack();
341 for (size_t i = 0; i < functionStack.size(); ++i) {
342 FuncDeclNode* funcDecl = functionStack[i];
343 const Identifier& ident = funcDecl->m_ident;
344 m_functions.add(ident.ustring().rep());
345 emitNewFunction(addVar(ident, false), funcDecl);
346 }
347
348 const DeclarationStacks::VarStack& varStack = functionBody->varStack();
349 for (size_t i = 0; i < varStack.size(); ++i)
350 addVar(varStack[i].first, varStack[i].second & DeclarationStacks::IsConstant);
351
352 const Identifier* parameters = functionBody->parameters();
353 size_t parameterCount = functionBody->parameterCount();
354 m_nextParameterIndex = -RegisterFile::CallFrameHeaderSize - parameterCount - 1;
355 m_parameters.grow(1 + parameterCount); // reserve space for "this"
356
357 // Add "this" as a parameter
358 m_thisRegister.setIndex(m_nextParameterIndex);
359 ++m_nextParameterIndex;
360 ++m_codeBlock->m_numParameters;
361
362 if (functionBody->usesThis() || m_shouldEmitDebugHooks) {
363 emitOpcode(op_convert_this);
364 instructions().append(m_thisRegister.index());
365 }
366
367 for (size_t i = 0; i < parameterCount; ++i)
368 addParameter(parameters[i]);
369
370 preserveLastVar();
371 }
372
BytecodeGenerator(EvalNode * evalNode,const Debugger * debugger,const ScopeChain & scopeChain,SymbolTable * symbolTable,EvalCodeBlock * codeBlock)373 BytecodeGenerator::BytecodeGenerator(EvalNode* evalNode, const Debugger* debugger, const ScopeChain& scopeChain, SymbolTable* symbolTable, EvalCodeBlock* codeBlock)
374 : m_shouldEmitDebugHooks(!!debugger)
375 , m_shouldEmitProfileHooks(scopeChain.globalObject()->supportsProfiling())
376 , m_scopeChain(&scopeChain)
377 , m_symbolTable(symbolTable)
378 , m_scopeNode(evalNode)
379 , m_codeBlock(codeBlock)
380 , m_thisRegister(RegisterFile::ProgramCodeThisRegister)
381 , m_finallyDepth(0)
382 , m_dynamicScopeDepth(0)
383 , m_baseScopeDepth(codeBlock->baseScopeDepth())
384 , m_codeType(EvalCode)
385 , m_nextConstantOffset(0)
386 , m_globalConstantIndex(0)
387 , m_globalData(&scopeChain.globalObject()->globalExec()->globalData())
388 , m_lastOpcodeID(op_end)
389 , m_emitNodeDepth(0)
390 , m_regeneratingForExceptionInfo(false)
391 , m_codeBlockBeingRegeneratedFrom(0)
392 {
393 if (m_shouldEmitDebugHooks || m_baseScopeDepth)
394 m_codeBlock->setNeedsFullScopeChain(true);
395
396 emitOpcode(op_enter);
397 codeBlock->setGlobalData(m_globalData);
398 m_codeBlock->m_numParameters = 1; // Allocate space for "this"
399
400 preserveLastVar();
401 }
402
addParameter(const Identifier & ident)403 RegisterID* BytecodeGenerator::addParameter(const Identifier& ident)
404 {
405 // Parameters overwrite var declarations, but not function declarations.
406 RegisterID* result = 0;
407 UString::Rep* rep = ident.ustring().rep();
408 if (!m_functions.contains(rep)) {
409 symbolTable().set(rep, m_nextParameterIndex);
410 RegisterID& parameter = registerFor(m_nextParameterIndex);
411 parameter.setIndex(m_nextParameterIndex);
412 result = ¶meter;
413 }
414
415 // To maintain the calling convention, we have to allocate unique space for
416 // each parameter, even if the parameter doesn't make it into the symbol table.
417 ++m_nextParameterIndex;
418 ++m_codeBlock->m_numParameters;
419 return result;
420 }
421
registerFor(const Identifier & ident)422 RegisterID* BytecodeGenerator::registerFor(const Identifier& ident)
423 {
424 if (ident == propertyNames().thisIdentifier)
425 return &m_thisRegister;
426
427 if (!shouldOptimizeLocals())
428 return 0;
429
430 SymbolTableEntry entry = symbolTable().get(ident.ustring().rep());
431 if (entry.isNull())
432 return 0;
433
434 if (ident == propertyNames().arguments)
435 createArgumentsIfNecessary();
436
437 return ®isterFor(entry.getIndex());
438 }
439
willResolveToArguments(const Identifier & ident)440 bool BytecodeGenerator::willResolveToArguments(const Identifier& ident)
441 {
442 if (ident != propertyNames().arguments)
443 return false;
444
445 if (!shouldOptimizeLocals())
446 return false;
447
448 SymbolTableEntry entry = symbolTable().get(ident.ustring().rep());
449 if (entry.isNull())
450 return false;
451
452 if (m_codeBlock->usesArguments() && m_codeType == FunctionCode)
453 return true;
454
455 return false;
456 }
457
uncheckedRegisterForArguments()458 RegisterID* BytecodeGenerator::uncheckedRegisterForArguments()
459 {
460 ASSERT(willResolveToArguments(propertyNames().arguments));
461
462 SymbolTableEntry entry = symbolTable().get(propertyNames().arguments.ustring().rep());
463 ASSERT(!entry.isNull());
464 return ®isterFor(entry.getIndex());
465 }
466
constRegisterFor(const Identifier & ident)467 RegisterID* BytecodeGenerator::constRegisterFor(const Identifier& ident)
468 {
469 if (m_codeType == EvalCode)
470 return 0;
471
472 SymbolTableEntry entry = symbolTable().get(ident.ustring().rep());
473 ASSERT(!entry.isNull());
474
475 return ®isterFor(entry.getIndex());
476 }
477
isLocal(const Identifier & ident)478 bool BytecodeGenerator::isLocal(const Identifier& ident)
479 {
480 if (ident == propertyNames().thisIdentifier)
481 return true;
482
483 return shouldOptimizeLocals() && symbolTable().contains(ident.ustring().rep());
484 }
485
isLocalConstant(const Identifier & ident)486 bool BytecodeGenerator::isLocalConstant(const Identifier& ident)
487 {
488 return symbolTable().get(ident.ustring().rep()).isReadOnly();
489 }
490
newRegister()491 RegisterID* BytecodeGenerator::newRegister()
492 {
493 m_calleeRegisters.append(m_calleeRegisters.size());
494 m_codeBlock->m_numCalleeRegisters = max<int>(m_codeBlock->m_numCalleeRegisters, m_calleeRegisters.size());
495 return &m_calleeRegisters.last();
496 }
497
newTemporary()498 RegisterID* BytecodeGenerator::newTemporary()
499 {
500 // Reclaim free register IDs.
501 while (m_calleeRegisters.size() && !m_calleeRegisters.last().refCount())
502 m_calleeRegisters.removeLast();
503
504 RegisterID* result = newRegister();
505 result->setTemporary();
506 return result;
507 }
508
highestUsedRegister()509 RegisterID* BytecodeGenerator::highestUsedRegister()
510 {
511 size_t count = m_codeBlock->m_numCalleeRegisters;
512 while (m_calleeRegisters.size() < count)
513 newRegister();
514 return &m_calleeRegisters.last();
515 }
516
newLabelScope(LabelScope::Type type,const Identifier * name)517 PassRefPtr<LabelScope> BytecodeGenerator::newLabelScope(LabelScope::Type type, const Identifier* name)
518 {
519 // Reclaim free label scopes.
520 while (m_labelScopes.size() && !m_labelScopes.last().refCount())
521 m_labelScopes.removeLast();
522
523 // Allocate new label scope.
524 LabelScope scope(type, name, scopeDepth(), newLabel(), type == LabelScope::Loop ? newLabel() : PassRefPtr<Label>()); // Only loops have continue targets.
525 m_labelScopes.append(scope);
526 return &m_labelScopes.last();
527 }
528
newLabel()529 PassRefPtr<Label> BytecodeGenerator::newLabel()
530 {
531 // Reclaim free label IDs.
532 while (m_labels.size() && !m_labels.last().refCount())
533 m_labels.removeLast();
534
535 // Allocate new label ID.
536 m_labels.append(m_codeBlock);
537 return &m_labels.last();
538 }
539
emitLabel(Label * l0)540 PassRefPtr<Label> BytecodeGenerator::emitLabel(Label* l0)
541 {
542 unsigned newLabelIndex = instructions().size();
543 l0->setLocation(newLabelIndex);
544
545 if (m_codeBlock->numberOfJumpTargets()) {
546 unsigned lastLabelIndex = m_codeBlock->lastJumpTarget();
547 ASSERT(lastLabelIndex <= newLabelIndex);
548 if (newLabelIndex == lastLabelIndex) {
549 // Peephole optimizations have already been disabled by emitting the last label
550 return l0;
551 }
552 }
553
554 m_codeBlock->addJumpTarget(newLabelIndex);
555
556 // This disables peephole optimizations when an instruction is a jump target
557 m_lastOpcodeID = op_end;
558 return l0;
559 }
560
emitOpcode(OpcodeID opcodeID)561 void BytecodeGenerator::emitOpcode(OpcodeID opcodeID)
562 {
563 instructions().append(globalData()->interpreter->getOpcode(opcodeID));
564 m_lastOpcodeID = opcodeID;
565 }
566
retrieveLastBinaryOp(int & dstIndex,int & src1Index,int & src2Index)567 void BytecodeGenerator::retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index)
568 {
569 ASSERT(instructions().size() >= 4);
570 size_t size = instructions().size();
571 dstIndex = instructions().at(size - 3).u.operand;
572 src1Index = instructions().at(size - 2).u.operand;
573 src2Index = instructions().at(size - 1).u.operand;
574 }
575
retrieveLastUnaryOp(int & dstIndex,int & srcIndex)576 void BytecodeGenerator::retrieveLastUnaryOp(int& dstIndex, int& srcIndex)
577 {
578 ASSERT(instructions().size() >= 3);
579 size_t size = instructions().size();
580 dstIndex = instructions().at(size - 2).u.operand;
581 srcIndex = instructions().at(size - 1).u.operand;
582 }
583
rewindBinaryOp()584 void ALWAYS_INLINE BytecodeGenerator::rewindBinaryOp()
585 {
586 ASSERT(instructions().size() >= 4);
587 instructions().shrink(instructions().size() - 4);
588 }
589
rewindUnaryOp()590 void ALWAYS_INLINE BytecodeGenerator::rewindUnaryOp()
591 {
592 ASSERT(instructions().size() >= 3);
593 instructions().shrink(instructions().size() - 3);
594 }
595
emitJump(Label * target)596 PassRefPtr<Label> BytecodeGenerator::emitJump(Label* target)
597 {
598 emitOpcode(target->isForward() ? op_jmp : op_loop);
599 instructions().append(target->offsetFrom(instructions().size()));
600 return target;
601 }
602
emitJumpIfTrue(RegisterID * cond,Label * target)603 PassRefPtr<Label> BytecodeGenerator::emitJumpIfTrue(RegisterID* cond, Label* target)
604 {
605 if (m_lastOpcodeID == op_less && !target->isForward()) {
606 int dstIndex;
607 int src1Index;
608 int src2Index;
609
610 retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
611
612 if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
613 rewindBinaryOp();
614 emitOpcode(op_loop_if_less);
615 instructions().append(src1Index);
616 instructions().append(src2Index);
617 instructions().append(target->offsetFrom(instructions().size()));
618 return target;
619 }
620 } else if (m_lastOpcodeID == op_lesseq && !target->isForward()) {
621 int dstIndex;
622 int src1Index;
623 int src2Index;
624
625 retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
626
627 if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
628 rewindBinaryOp();
629 emitOpcode(op_loop_if_lesseq);
630 instructions().append(src1Index);
631 instructions().append(src2Index);
632 instructions().append(target->offsetFrom(instructions().size()));
633 return target;
634 }
635 } else if (m_lastOpcodeID == op_eq_null && target->isForward()) {
636 int dstIndex;
637 int srcIndex;
638
639 retrieveLastUnaryOp(dstIndex, srcIndex);
640
641 if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
642 rewindUnaryOp();
643 emitOpcode(op_jeq_null);
644 instructions().append(srcIndex);
645 instructions().append(target->offsetFrom(instructions().size()));
646 return target;
647 }
648 } else if (m_lastOpcodeID == op_neq_null && target->isForward()) {
649 int dstIndex;
650 int srcIndex;
651
652 retrieveLastUnaryOp(dstIndex, srcIndex);
653
654 if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
655 rewindUnaryOp();
656 emitOpcode(op_jneq_null);
657 instructions().append(srcIndex);
658 instructions().append(target->offsetFrom(instructions().size()));
659 return target;
660 }
661 }
662
663 emitOpcode(target->isForward() ? op_jtrue : op_loop_if_true);
664 instructions().append(cond->index());
665 instructions().append(target->offsetFrom(instructions().size()));
666 return target;
667 }
668
emitJumpIfFalse(RegisterID * cond,Label * target)669 PassRefPtr<Label> BytecodeGenerator::emitJumpIfFalse(RegisterID* cond, Label* target)
670 {
671 ASSERT(target->isForward());
672
673 if (m_lastOpcodeID == op_less) {
674 int dstIndex;
675 int src1Index;
676 int src2Index;
677
678 retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
679
680 if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
681 rewindBinaryOp();
682 emitOpcode(op_jnless);
683 instructions().append(src1Index);
684 instructions().append(src2Index);
685 instructions().append(target->offsetFrom(instructions().size()));
686 return target;
687 }
688 } else if (m_lastOpcodeID == op_lesseq) {
689 int dstIndex;
690 int src1Index;
691 int src2Index;
692
693 retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
694
695 if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
696 rewindBinaryOp();
697 emitOpcode(op_jnlesseq);
698 instructions().append(src1Index);
699 instructions().append(src2Index);
700 instructions().append(target->offsetFrom(instructions().size()));
701 return target;
702 }
703 } else if (m_lastOpcodeID == op_not) {
704 int dstIndex;
705 int srcIndex;
706
707 retrieveLastUnaryOp(dstIndex, srcIndex);
708
709 if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
710 rewindUnaryOp();
711 emitOpcode(op_jtrue);
712 instructions().append(srcIndex);
713 instructions().append(target->offsetFrom(instructions().size()));
714 return target;
715 }
716 } else if (m_lastOpcodeID == op_eq_null) {
717 int dstIndex;
718 int srcIndex;
719
720 retrieveLastUnaryOp(dstIndex, srcIndex);
721
722 if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
723 rewindUnaryOp();
724 emitOpcode(op_jneq_null);
725 instructions().append(srcIndex);
726 instructions().append(target->offsetFrom(instructions().size()));
727 return target;
728 }
729 } else if (m_lastOpcodeID == op_neq_null) {
730 int dstIndex;
731 int srcIndex;
732
733 retrieveLastUnaryOp(dstIndex, srcIndex);
734
735 if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
736 rewindUnaryOp();
737 emitOpcode(op_jeq_null);
738 instructions().append(srcIndex);
739 instructions().append(target->offsetFrom(instructions().size()));
740 return target;
741 }
742 }
743
744 emitOpcode(op_jfalse);
745 instructions().append(cond->index());
746 instructions().append(target->offsetFrom(instructions().size()));
747 return target;
748 }
749
emitJumpIfNotFunctionCall(RegisterID * cond,Label * target)750 PassRefPtr<Label> BytecodeGenerator::emitJumpIfNotFunctionCall(RegisterID* cond, Label* target)
751 {
752 emitOpcode(op_jneq_ptr);
753 instructions().append(cond->index());
754 instructions().append(m_scopeChain->globalObject()->d()->callFunction);
755 instructions().append(target->offsetFrom(instructions().size()));
756 return target;
757 }
758
emitJumpIfNotFunctionApply(RegisterID * cond,Label * target)759 PassRefPtr<Label> BytecodeGenerator::emitJumpIfNotFunctionApply(RegisterID* cond, Label* target)
760 {
761 emitOpcode(op_jneq_ptr);
762 instructions().append(cond->index());
763 instructions().append(m_scopeChain->globalObject()->d()->applyFunction);
764 instructions().append(target->offsetFrom(instructions().size()));
765 return target;
766 }
767
addConstant(FuncDeclNode * n)768 unsigned BytecodeGenerator::addConstant(FuncDeclNode* n)
769 {
770 // No need to explicitly unique function body nodes -- they're unique already.
771 return m_codeBlock->addFunction(n);
772 }
773
addConstant(FuncExprNode * n)774 unsigned BytecodeGenerator::addConstant(FuncExprNode* n)
775 {
776 // No need to explicitly unique function expression nodes -- they're unique already.
777 return m_codeBlock->addFunctionExpression(n);
778 }
779
addConstant(const Identifier & ident)780 unsigned BytecodeGenerator::addConstant(const Identifier& ident)
781 {
782 UString::Rep* rep = ident.ustring().rep();
783 pair<IdentifierMap::iterator, bool> result = m_identifierMap.add(rep, m_codeBlock->numberOfIdentifiers());
784 if (result.second) // new entry
785 m_codeBlock->addIdentifier(Identifier(m_globalData, rep));
786
787 return result.first->second;
788 }
789
addConstantValue(JSValue v)790 RegisterID* BytecodeGenerator::addConstantValue(JSValue v)
791 {
792 int index = m_nextConstantOffset;
793
794 pair<JSValueMap::iterator, bool> result = m_jsValueMap.add(JSValue::encode(v), m_nextConstantOffset);
795 if (result.second) {
796 m_constantPoolRegisters.append(FirstConstantRegisterIndex + m_nextConstantOffset);
797 ++m_nextConstantOffset;
798 m_codeBlock->addConstantRegister(JSValue(v));
799 } else
800 index = result.first->second;
801
802 return &m_constantPoolRegisters[index];
803 }
804
addRegExp(RegExp * r)805 unsigned BytecodeGenerator::addRegExp(RegExp* r)
806 {
807 return m_codeBlock->addRegExp(r);
808 }
809
emitMove(RegisterID * dst,RegisterID * src)810 RegisterID* BytecodeGenerator::emitMove(RegisterID* dst, RegisterID* src)
811 {
812 emitOpcode(op_mov);
813 instructions().append(dst->index());
814 instructions().append(src->index());
815 return dst;
816 }
817
emitUnaryOp(OpcodeID opcodeID,RegisterID * dst,RegisterID * src)818 RegisterID* BytecodeGenerator::emitUnaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src)
819 {
820 emitOpcode(opcodeID);
821 instructions().append(dst->index());
822 instructions().append(src->index());
823 return dst;
824 }
825
emitPreInc(RegisterID * srcDst)826 RegisterID* BytecodeGenerator::emitPreInc(RegisterID* srcDst)
827 {
828 emitOpcode(op_pre_inc);
829 instructions().append(srcDst->index());
830 return srcDst;
831 }
832
emitPreDec(RegisterID * srcDst)833 RegisterID* BytecodeGenerator::emitPreDec(RegisterID* srcDst)
834 {
835 emitOpcode(op_pre_dec);
836 instructions().append(srcDst->index());
837 return srcDst;
838 }
839
emitPostInc(RegisterID * dst,RegisterID * srcDst)840 RegisterID* BytecodeGenerator::emitPostInc(RegisterID* dst, RegisterID* srcDst)
841 {
842 emitOpcode(op_post_inc);
843 instructions().append(dst->index());
844 instructions().append(srcDst->index());
845 return dst;
846 }
847
emitPostDec(RegisterID * dst,RegisterID * srcDst)848 RegisterID* BytecodeGenerator::emitPostDec(RegisterID* dst, RegisterID* srcDst)
849 {
850 emitOpcode(op_post_dec);
851 instructions().append(dst->index());
852 instructions().append(srcDst->index());
853 return dst;
854 }
855
emitBinaryOp(OpcodeID opcodeID,RegisterID * dst,RegisterID * src1,RegisterID * src2,OperandTypes types)856 RegisterID* BytecodeGenerator::emitBinaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes types)
857 {
858 emitOpcode(opcodeID);
859 instructions().append(dst->index());
860 instructions().append(src1->index());
861 instructions().append(src2->index());
862
863 if (opcodeID == op_bitor || opcodeID == op_bitand || opcodeID == op_bitxor ||
864 opcodeID == op_add || opcodeID == op_mul || opcodeID == op_sub || opcodeID == op_div)
865 instructions().append(types.toInt());
866
867 return dst;
868 }
869
emitEqualityOp(OpcodeID opcodeID,RegisterID * dst,RegisterID * src1,RegisterID * src2)870 RegisterID* BytecodeGenerator::emitEqualityOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2)
871 {
872 if (m_lastOpcodeID == op_typeof) {
873 int dstIndex;
874 int srcIndex;
875
876 retrieveLastUnaryOp(dstIndex, srcIndex);
877
878 if (src1->index() == dstIndex
879 && src1->isTemporary()
880 && m_codeBlock->isConstantRegisterIndex(src2->index())
881 && m_codeBlock->constantRegister(src2->index()).jsValue().isString()) {
882 const UString& value = asString(m_codeBlock->constantRegister(src2->index()).jsValue())->value();
883 if (value == "undefined") {
884 rewindUnaryOp();
885 emitOpcode(op_is_undefined);
886 instructions().append(dst->index());
887 instructions().append(srcIndex);
888 return dst;
889 }
890 if (value == "boolean") {
891 rewindUnaryOp();
892 emitOpcode(op_is_boolean);
893 instructions().append(dst->index());
894 instructions().append(srcIndex);
895 return dst;
896 }
897 if (value == "number") {
898 rewindUnaryOp();
899 emitOpcode(op_is_number);
900 instructions().append(dst->index());
901 instructions().append(srcIndex);
902 return dst;
903 }
904 if (value == "string") {
905 rewindUnaryOp();
906 emitOpcode(op_is_string);
907 instructions().append(dst->index());
908 instructions().append(srcIndex);
909 return dst;
910 }
911 if (value == "object") {
912 rewindUnaryOp();
913 emitOpcode(op_is_object);
914 instructions().append(dst->index());
915 instructions().append(srcIndex);
916 return dst;
917 }
918 if (value == "function") {
919 rewindUnaryOp();
920 emitOpcode(op_is_function);
921 instructions().append(dst->index());
922 instructions().append(srcIndex);
923 return dst;
924 }
925 }
926 }
927
928 emitOpcode(opcodeID);
929 instructions().append(dst->index());
930 instructions().append(src1->index());
931 instructions().append(src2->index());
932 return dst;
933 }
934
emitLoad(RegisterID * dst,bool b)935 RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, bool b)
936 {
937 return emitLoad(dst, jsBoolean(b));
938 }
939
emitLoad(RegisterID * dst,double number)940 RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, double number)
941 {
942 // FIXME: Our hash tables won't hold infinity, so we make a new JSNumberCell each time.
943 // Later we can do the extra work to handle that like the other cases.
944 if (number == HashTraits<double>::emptyValue() || HashTraits<double>::isDeletedValue(number))
945 return emitLoad(dst, jsNumber(globalData(), number));
946 JSValue& valueInMap = m_numberMap.add(number, JSValue()).first->second;
947 if (!valueInMap)
948 valueInMap = jsNumber(globalData(), number);
949 return emitLoad(dst, valueInMap);
950 }
951
emitLoad(RegisterID * dst,const Identifier & identifier)952 RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, const Identifier& identifier)
953 {
954 JSString*& stringInMap = m_stringMap.add(identifier.ustring().rep(), 0).first->second;
955 if (!stringInMap)
956 stringInMap = jsOwnedString(globalData(), identifier.ustring());
957 return emitLoad(dst, JSValue(stringInMap));
958 }
959
emitLoad(RegisterID * dst,JSValue v)960 RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, JSValue v)
961 {
962 RegisterID* constantID = addConstantValue(v);
963 if (dst)
964 return emitMove(dst, constantID);
965 return constantID;
966 }
967
findScopedProperty(const Identifier & property,int & index,size_t & stackDepth,bool forWriting,JSObject * & globalObject)968 bool BytecodeGenerator::findScopedProperty(const Identifier& property, int& index, size_t& stackDepth, bool forWriting, JSObject*& globalObject)
969 {
970 // Cases where we cannot statically optimize the lookup.
971 if (property == propertyNames().arguments || !canOptimizeNonLocals()) {
972 stackDepth = 0;
973 index = missingSymbolMarker();
974
975 if (shouldOptimizeLocals() && m_codeType == GlobalCode) {
976 ScopeChainIterator iter = m_scopeChain->begin();
977 globalObject = *iter;
978 ASSERT((++iter) == m_scopeChain->end());
979 }
980 return false;
981 }
982
983 size_t depth = 0;
984
985 ScopeChainIterator iter = m_scopeChain->begin();
986 ScopeChainIterator end = m_scopeChain->end();
987 for (; iter != end; ++iter, ++depth) {
988 JSObject* currentScope = *iter;
989 if (!currentScope->isVariableObject())
990 break;
991 JSVariableObject* currentVariableObject = static_cast<JSVariableObject*>(currentScope);
992 SymbolTableEntry entry = currentVariableObject->symbolTable().get(property.ustring().rep());
993
994 // Found the property
995 if (!entry.isNull()) {
996 if (entry.isReadOnly() && forWriting) {
997 stackDepth = 0;
998 index = missingSymbolMarker();
999 if (++iter == end)
1000 globalObject = currentVariableObject;
1001 return false;
1002 }
1003 stackDepth = depth;
1004 index = entry.getIndex();
1005 if (++iter == end)
1006 globalObject = currentVariableObject;
1007 return true;
1008 }
1009 if (currentVariableObject->isDynamicScope())
1010 break;
1011 }
1012
1013 // Can't locate the property but we're able to avoid a few lookups.
1014 stackDepth = depth;
1015 index = missingSymbolMarker();
1016 JSObject* scope = *iter;
1017 if (++iter == end)
1018 globalObject = scope;
1019 return true;
1020 }
1021
emitInstanceOf(RegisterID * dst,RegisterID * value,RegisterID * base,RegisterID * basePrototype)1022 RegisterID* BytecodeGenerator::emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* base, RegisterID* basePrototype)
1023 {
1024 emitOpcode(op_instanceof);
1025 instructions().append(dst->index());
1026 instructions().append(value->index());
1027 instructions().append(base->index());
1028 instructions().append(basePrototype->index());
1029 return dst;
1030 }
1031
emitResolve(RegisterID * dst,const Identifier & property)1032 RegisterID* BytecodeGenerator::emitResolve(RegisterID* dst, const Identifier& property)
1033 {
1034 size_t depth = 0;
1035 int index = 0;
1036 JSObject* globalObject = 0;
1037 if (!findScopedProperty(property, index, depth, false, globalObject) && !globalObject) {
1038 // We can't optimise at all :-(
1039 emitOpcode(op_resolve);
1040 instructions().append(dst->index());
1041 instructions().append(addConstant(property));
1042 return dst;
1043 }
1044
1045 if (globalObject) {
1046 bool forceGlobalResolve = false;
1047 if (m_regeneratingForExceptionInfo) {
1048 #if ENABLE(JIT)
1049 forceGlobalResolve = m_codeBlockBeingRegeneratedFrom->hasGlobalResolveInfoAtBytecodeOffset(instructions().size());
1050 #else
1051 forceGlobalResolve = m_codeBlockBeingRegeneratedFrom->hasGlobalResolveInstructionAtBytecodeOffset(instructions().size());
1052 #endif
1053 }
1054
1055 if (index != missingSymbolMarker() && !forceGlobalResolve) {
1056 // Directly index the property lookup across multiple scopes.
1057 return emitGetScopedVar(dst, depth, index, globalObject);
1058 }
1059
1060 #if ENABLE(JIT)
1061 m_codeBlock->addGlobalResolveInfo(instructions().size());
1062 #else
1063 m_codeBlock->addGlobalResolveInstruction(instructions().size());
1064 #endif
1065 emitOpcode(op_resolve_global);
1066 instructions().append(dst->index());
1067 instructions().append(globalObject);
1068 instructions().append(addConstant(property));
1069 instructions().append(0);
1070 instructions().append(0);
1071 return dst;
1072 }
1073
1074 if (index != missingSymbolMarker()) {
1075 // Directly index the property lookup across multiple scopes.
1076 return emitGetScopedVar(dst, depth, index, globalObject);
1077 }
1078
1079 // In this case we are at least able to drop a few scope chains from the
1080 // lookup chain, although we still need to hash from then on.
1081 emitOpcode(op_resolve_skip);
1082 instructions().append(dst->index());
1083 instructions().append(addConstant(property));
1084 instructions().append(depth);
1085 return dst;
1086 }
1087
emitGetScopedVar(RegisterID * dst,size_t depth,int index,JSValue globalObject)1088 RegisterID* BytecodeGenerator::emitGetScopedVar(RegisterID* dst, size_t depth, int index, JSValue globalObject)
1089 {
1090 if (globalObject) {
1091 emitOpcode(op_get_global_var);
1092 instructions().append(dst->index());
1093 instructions().append(asCell(globalObject));
1094 instructions().append(index);
1095 return dst;
1096 }
1097
1098 emitOpcode(op_get_scoped_var);
1099 instructions().append(dst->index());
1100 instructions().append(index);
1101 instructions().append(depth);
1102 return dst;
1103 }
1104
emitPutScopedVar(size_t depth,int index,RegisterID * value,JSValue globalObject)1105 RegisterID* BytecodeGenerator::emitPutScopedVar(size_t depth, int index, RegisterID* value, JSValue globalObject)
1106 {
1107 if (globalObject) {
1108 emitOpcode(op_put_global_var);
1109 instructions().append(asCell(globalObject));
1110 instructions().append(index);
1111 instructions().append(value->index());
1112 return value;
1113 }
1114 emitOpcode(op_put_scoped_var);
1115 instructions().append(index);
1116 instructions().append(depth);
1117 instructions().append(value->index());
1118 return value;
1119 }
1120
emitResolveBase(RegisterID * dst,const Identifier & property)1121 RegisterID* BytecodeGenerator::emitResolveBase(RegisterID* dst, const Identifier& property)
1122 {
1123 size_t depth = 0;
1124 int index = 0;
1125 JSObject* globalObject = 0;
1126 findScopedProperty(property, index, depth, false, globalObject);
1127 if (!globalObject) {
1128 // We can't optimise at all :-(
1129 emitOpcode(op_resolve_base);
1130 instructions().append(dst->index());
1131 instructions().append(addConstant(property));
1132 return dst;
1133 }
1134
1135 // Global object is the base
1136 return emitLoad(dst, JSValue(globalObject));
1137 }
1138
emitResolveWithBase(RegisterID * baseDst,RegisterID * propDst,const Identifier & property)1139 RegisterID* BytecodeGenerator::emitResolveWithBase(RegisterID* baseDst, RegisterID* propDst, const Identifier& property)
1140 {
1141 size_t depth = 0;
1142 int index = 0;
1143 JSObject* globalObject = 0;
1144 if (!findScopedProperty(property, index, depth, false, globalObject) || !globalObject) {
1145 // We can't optimise at all :-(
1146 emitOpcode(op_resolve_with_base);
1147 instructions().append(baseDst->index());
1148 instructions().append(propDst->index());
1149 instructions().append(addConstant(property));
1150 return baseDst;
1151 }
1152
1153 bool forceGlobalResolve = false;
1154 if (m_regeneratingForExceptionInfo) {
1155 #if ENABLE(JIT)
1156 forceGlobalResolve = m_codeBlockBeingRegeneratedFrom->hasGlobalResolveInfoAtBytecodeOffset(instructions().size());
1157 #else
1158 forceGlobalResolve = m_codeBlockBeingRegeneratedFrom->hasGlobalResolveInstructionAtBytecodeOffset(instructions().size());
1159 #endif
1160 }
1161
1162 // Global object is the base
1163 emitLoad(baseDst, JSValue(globalObject));
1164
1165 if (index != missingSymbolMarker() && !forceGlobalResolve) {
1166 // Directly index the property lookup across multiple scopes.
1167 emitGetScopedVar(propDst, depth, index, globalObject);
1168 return baseDst;
1169 }
1170
1171 #if ENABLE(JIT)
1172 m_codeBlock->addGlobalResolveInfo(instructions().size());
1173 #else
1174 m_codeBlock->addGlobalResolveInstruction(instructions().size());
1175 #endif
1176 emitOpcode(op_resolve_global);
1177 instructions().append(propDst->index());
1178 instructions().append(globalObject);
1179 instructions().append(addConstant(property));
1180 instructions().append(0);
1181 instructions().append(0);
1182 return baseDst;
1183 }
1184
emitMethodCheck()1185 void BytecodeGenerator::emitMethodCheck()
1186 {
1187 emitOpcode(op_method_check);
1188 }
1189
emitGetById(RegisterID * dst,RegisterID * base,const Identifier & property)1190 RegisterID* BytecodeGenerator::emitGetById(RegisterID* dst, RegisterID* base, const Identifier& property)
1191 {
1192 #if ENABLE(JIT)
1193 m_codeBlock->addStructureStubInfo(StructureStubInfo(access_get_by_id));
1194 #else
1195 m_codeBlock->addPropertyAccessInstruction(instructions().size());
1196 #endif
1197
1198 emitOpcode(op_get_by_id);
1199 instructions().append(dst->index());
1200 instructions().append(base->index());
1201 instructions().append(addConstant(property));
1202 instructions().append(0);
1203 instructions().append(0);
1204 instructions().append(0);
1205 instructions().append(0);
1206 return dst;
1207 }
1208
emitPutById(RegisterID * base,const Identifier & property,RegisterID * value)1209 RegisterID* BytecodeGenerator::emitPutById(RegisterID* base, const Identifier& property, RegisterID* value)
1210 {
1211 #if ENABLE(JIT)
1212 m_codeBlock->addStructureStubInfo(StructureStubInfo(access_put_by_id));
1213 #else
1214 m_codeBlock->addPropertyAccessInstruction(instructions().size());
1215 #endif
1216
1217 emitOpcode(op_put_by_id);
1218 instructions().append(base->index());
1219 instructions().append(addConstant(property));
1220 instructions().append(value->index());
1221 instructions().append(0);
1222 instructions().append(0);
1223 instructions().append(0);
1224 instructions().append(0);
1225 return value;
1226 }
1227
emitPutGetter(RegisterID * base,const Identifier & property,RegisterID * value)1228 RegisterID* BytecodeGenerator::emitPutGetter(RegisterID* base, const Identifier& property, RegisterID* value)
1229 {
1230 emitOpcode(op_put_getter);
1231 instructions().append(base->index());
1232 instructions().append(addConstant(property));
1233 instructions().append(value->index());
1234 return value;
1235 }
1236
emitPutSetter(RegisterID * base,const Identifier & property,RegisterID * value)1237 RegisterID* BytecodeGenerator::emitPutSetter(RegisterID* base, const Identifier& property, RegisterID* value)
1238 {
1239 emitOpcode(op_put_setter);
1240 instructions().append(base->index());
1241 instructions().append(addConstant(property));
1242 instructions().append(value->index());
1243 return value;
1244 }
1245
emitDeleteById(RegisterID * dst,RegisterID * base,const Identifier & property)1246 RegisterID* BytecodeGenerator::emitDeleteById(RegisterID* dst, RegisterID* base, const Identifier& property)
1247 {
1248 emitOpcode(op_del_by_id);
1249 instructions().append(dst->index());
1250 instructions().append(base->index());
1251 instructions().append(addConstant(property));
1252 return dst;
1253 }
1254
emitGetByVal(RegisterID * dst,RegisterID * base,RegisterID * property)1255 RegisterID* BytecodeGenerator::emitGetByVal(RegisterID* dst, RegisterID* base, RegisterID* property)
1256 {
1257 emitOpcode(op_get_by_val);
1258 instructions().append(dst->index());
1259 instructions().append(base->index());
1260 instructions().append(property->index());
1261 return dst;
1262 }
1263
emitPutByVal(RegisterID * base,RegisterID * property,RegisterID * value)1264 RegisterID* BytecodeGenerator::emitPutByVal(RegisterID* base, RegisterID* property, RegisterID* value)
1265 {
1266 emitOpcode(op_put_by_val);
1267 instructions().append(base->index());
1268 instructions().append(property->index());
1269 instructions().append(value->index());
1270 return value;
1271 }
1272
emitDeleteByVal(RegisterID * dst,RegisterID * base,RegisterID * property)1273 RegisterID* BytecodeGenerator::emitDeleteByVal(RegisterID* dst, RegisterID* base, RegisterID* property)
1274 {
1275 emitOpcode(op_del_by_val);
1276 instructions().append(dst->index());
1277 instructions().append(base->index());
1278 instructions().append(property->index());
1279 return dst;
1280 }
1281
emitPutByIndex(RegisterID * base,unsigned index,RegisterID * value)1282 RegisterID* BytecodeGenerator::emitPutByIndex(RegisterID* base, unsigned index, RegisterID* value)
1283 {
1284 emitOpcode(op_put_by_index);
1285 instructions().append(base->index());
1286 instructions().append(index);
1287 instructions().append(value->index());
1288 return value;
1289 }
1290
emitNewObject(RegisterID * dst)1291 RegisterID* BytecodeGenerator::emitNewObject(RegisterID* dst)
1292 {
1293 emitOpcode(op_new_object);
1294 instructions().append(dst->index());
1295 return dst;
1296 }
1297
emitNewArray(RegisterID * dst,ElementNode * elements)1298 RegisterID* BytecodeGenerator::emitNewArray(RegisterID* dst, ElementNode* elements)
1299 {
1300 Vector<RefPtr<RegisterID>, 16> argv;
1301 for (ElementNode* n = elements; n; n = n->next()) {
1302 if (n->elision())
1303 break;
1304 argv.append(newTemporary());
1305 // op_new_array requires the initial values to be a sequential range of registers
1306 ASSERT(argv.size() == 1 || argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
1307 emitNode(argv.last().get(), n->value());
1308 }
1309 emitOpcode(op_new_array);
1310 instructions().append(dst->index());
1311 instructions().append(argv.size() ? argv[0]->index() : 0); // argv
1312 instructions().append(argv.size()); // argc
1313 return dst;
1314 }
1315
emitNewFunction(RegisterID * dst,FuncDeclNode * n)1316 RegisterID* BytecodeGenerator::emitNewFunction(RegisterID* dst, FuncDeclNode* n)
1317 {
1318 emitOpcode(op_new_func);
1319 instructions().append(dst->index());
1320 instructions().append(addConstant(n));
1321 return dst;
1322 }
1323
emitNewRegExp(RegisterID * dst,RegExp * regExp)1324 RegisterID* BytecodeGenerator::emitNewRegExp(RegisterID* dst, RegExp* regExp)
1325 {
1326 emitOpcode(op_new_regexp);
1327 instructions().append(dst->index());
1328 instructions().append(addRegExp(regExp));
1329 return dst;
1330 }
1331
1332
emitNewFunctionExpression(RegisterID * r0,FuncExprNode * n)1333 RegisterID* BytecodeGenerator::emitNewFunctionExpression(RegisterID* r0, FuncExprNode* n)
1334 {
1335 emitOpcode(op_new_func_exp);
1336 instructions().append(r0->index());
1337 instructions().append(addConstant(n));
1338 return r0;
1339 }
1340
emitCall(RegisterID * dst,RegisterID * func,RegisterID * thisRegister,ArgumentsNode * argumentsNode,unsigned divot,unsigned startOffset,unsigned endOffset)1341 RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
1342 {
1343 return emitCall(op_call, dst, func, thisRegister, argumentsNode, divot, startOffset, endOffset);
1344 }
1345
createArgumentsIfNecessary()1346 void BytecodeGenerator::createArgumentsIfNecessary()
1347 {
1348 if (m_codeBlock->usesArguments() && m_codeType == FunctionCode)
1349 emitOpcode(op_create_arguments);
1350 }
1351
emitCallEval(RegisterID * dst,RegisterID * func,RegisterID * thisRegister,ArgumentsNode * argumentsNode,unsigned divot,unsigned startOffset,unsigned endOffset)1352 RegisterID* BytecodeGenerator::emitCallEval(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
1353 {
1354 createArgumentsIfNecessary();
1355 return emitCall(op_call_eval, dst, func, thisRegister, argumentsNode, divot, startOffset, endOffset);
1356 }
1357
emitCall(OpcodeID opcodeID,RegisterID * dst,RegisterID * func,RegisterID * thisRegister,ArgumentsNode * argumentsNode,unsigned divot,unsigned startOffset,unsigned endOffset)1358 RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
1359 {
1360 ASSERT(opcodeID == op_call || opcodeID == op_call_eval);
1361 ASSERT(func->refCount());
1362 ASSERT(thisRegister->refCount());
1363
1364 RegisterID* originalFunc = func;
1365 if (m_shouldEmitProfileHooks) {
1366 // If codegen decided to recycle func as this call's destination register,
1367 // we need to undo that optimization here so that func will still be around
1368 // for the sake of op_profile_did_call.
1369 if (dst == func) {
1370 RefPtr<RegisterID> movedThisRegister = emitMove(newTemporary(), thisRegister);
1371 RefPtr<RegisterID> movedFunc = emitMove(thisRegister, func);
1372
1373 thisRegister = movedThisRegister.release().releaseRef();
1374 func = movedFunc.release().releaseRef();
1375 }
1376 }
1377
1378 // Generate code for arguments.
1379 Vector<RefPtr<RegisterID>, 16> argv;
1380 argv.append(thisRegister);
1381 for (ArgumentListNode* n = argumentsNode->m_listNode; n; n = n->m_next) {
1382 argv.append(newTemporary());
1383 // op_call requires the arguments to be a sequential range of registers
1384 ASSERT(argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
1385 emitNode(argv.last().get(), n);
1386 }
1387
1388 // Reserve space for call frame.
1389 Vector<RefPtr<RegisterID>, RegisterFile::CallFrameHeaderSize> callFrame;
1390 for (int i = 0; i < RegisterFile::CallFrameHeaderSize; ++i)
1391 callFrame.append(newTemporary());
1392
1393 if (m_shouldEmitProfileHooks) {
1394 emitOpcode(op_profile_will_call);
1395 instructions().append(func->index());
1396
1397 #if ENABLE(JIT)
1398 m_codeBlock->addFunctionRegisterInfo(instructions().size(), func->index());
1399 #endif
1400 }
1401
1402 emitExpressionInfo(divot, startOffset, endOffset);
1403
1404 #if ENABLE(JIT)
1405 m_codeBlock->addCallLinkInfo();
1406 #endif
1407
1408 // Emit call.
1409 emitOpcode(opcodeID);
1410 instructions().append(dst->index()); // dst
1411 instructions().append(func->index()); // func
1412 instructions().append(argv.size()); // argCount
1413 instructions().append(argv[0]->index() + argv.size() + RegisterFile::CallFrameHeaderSize); // registerOffset
1414
1415 if (m_shouldEmitProfileHooks) {
1416 emitOpcode(op_profile_did_call);
1417 instructions().append(func->index());
1418
1419 if (dst == originalFunc) {
1420 thisRegister->deref();
1421 func->deref();
1422 }
1423 }
1424
1425 return dst;
1426 }
1427
emitLoadVarargs(RegisterID * argCountDst,RegisterID * arguments)1428 RegisterID* BytecodeGenerator::emitLoadVarargs(RegisterID* argCountDst, RegisterID* arguments)
1429 {
1430 ASSERT(argCountDst->index() < arguments->index());
1431 emitOpcode(op_load_varargs);
1432 instructions().append(argCountDst->index());
1433 instructions().append(arguments->index());
1434 return argCountDst;
1435 }
1436
emitCallVarargs(RegisterID * dst,RegisterID * func,RegisterID * thisRegister,RegisterID * argCountRegister,unsigned divot,unsigned startOffset,unsigned endOffset)1437 RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* argCountRegister, unsigned divot, unsigned startOffset, unsigned endOffset)
1438 {
1439 ASSERT(func->refCount());
1440 ASSERT(thisRegister->refCount());
1441 ASSERT(dst != func);
1442 if (m_shouldEmitProfileHooks) {
1443 emitOpcode(op_profile_will_call);
1444 instructions().append(func->index());
1445
1446 #if ENABLE(JIT)
1447 m_codeBlock->addFunctionRegisterInfo(instructions().size(), func->index());
1448 #endif
1449 }
1450
1451 emitExpressionInfo(divot, startOffset, endOffset);
1452
1453 // Emit call.
1454 emitOpcode(op_call_varargs);
1455 instructions().append(dst->index()); // dst
1456 instructions().append(func->index()); // func
1457 instructions().append(argCountRegister->index()); // arg count
1458 instructions().append(thisRegister->index() + RegisterFile::CallFrameHeaderSize); // initial registerOffset
1459 if (m_shouldEmitProfileHooks) {
1460 emitOpcode(op_profile_did_call);
1461 instructions().append(func->index());
1462 }
1463 return dst;
1464 }
1465
emitReturn(RegisterID * src)1466 RegisterID* BytecodeGenerator::emitReturn(RegisterID* src)
1467 {
1468 if (m_codeBlock->needsFullScopeChain()) {
1469 emitOpcode(op_tear_off_activation);
1470 instructions().append(m_activationRegisterIndex);
1471 } else if (m_codeBlock->usesArguments() && m_codeBlock->m_numParameters > 1)
1472 emitOpcode(op_tear_off_arguments);
1473
1474 return emitUnaryNoDstOp(op_ret, src);
1475 }
1476
emitUnaryNoDstOp(OpcodeID opcodeID,RegisterID * src)1477 RegisterID* BytecodeGenerator::emitUnaryNoDstOp(OpcodeID opcodeID, RegisterID* src)
1478 {
1479 emitOpcode(opcodeID);
1480 instructions().append(src->index());
1481 return src;
1482 }
1483
emitConstruct(RegisterID * dst,RegisterID * func,ArgumentsNode * argumentsNode,unsigned divot,unsigned startOffset,unsigned endOffset)1484 RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
1485 {
1486 ASSERT(func->refCount());
1487
1488 RegisterID* originalFunc = func;
1489 if (m_shouldEmitProfileHooks) {
1490 // If codegen decided to recycle func as this call's destination register,
1491 // we need to undo that optimization here so that func will still be around
1492 // for the sake of op_profile_did_call.
1493 if (dst == func) {
1494 RefPtr<RegisterID> movedFunc = emitMove(newTemporary(), func);
1495 func = movedFunc.release().releaseRef();
1496 }
1497 }
1498
1499 RefPtr<RegisterID> funcProto = newTemporary();
1500
1501 // Generate code for arguments.
1502 Vector<RefPtr<RegisterID>, 16> argv;
1503 argv.append(newTemporary()); // reserve space for "this"
1504 for (ArgumentListNode* n = argumentsNode ? argumentsNode->m_listNode : 0; n; n = n->m_next) {
1505 argv.append(newTemporary());
1506 // op_construct requires the arguments to be a sequential range of registers
1507 ASSERT(argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
1508 emitNode(argv.last().get(), n);
1509 }
1510
1511 if (m_shouldEmitProfileHooks) {
1512 emitOpcode(op_profile_will_call);
1513 instructions().append(func->index());
1514 }
1515
1516 // Load prototype.
1517 emitExpressionInfo(divot, startOffset, endOffset);
1518 emitGetByIdExceptionInfo(op_construct);
1519 emitGetById(funcProto.get(), func, globalData()->propertyNames->prototype);
1520
1521 // Reserve space for call frame.
1522 Vector<RefPtr<RegisterID>, RegisterFile::CallFrameHeaderSize> callFrame;
1523 for (int i = 0; i < RegisterFile::CallFrameHeaderSize; ++i)
1524 callFrame.append(newTemporary());
1525
1526 emitExpressionInfo(divot, startOffset, endOffset);
1527
1528 #if ENABLE(JIT)
1529 m_codeBlock->addCallLinkInfo();
1530 #endif
1531
1532 emitOpcode(op_construct);
1533 instructions().append(dst->index()); // dst
1534 instructions().append(func->index()); // func
1535 instructions().append(argv.size()); // argCount
1536 instructions().append(argv[0]->index() + argv.size() + RegisterFile::CallFrameHeaderSize); // registerOffset
1537 instructions().append(funcProto->index()); // proto
1538 instructions().append(argv[0]->index()); // thisRegister
1539
1540 emitOpcode(op_construct_verify);
1541 instructions().append(dst->index());
1542 instructions().append(argv[0]->index());
1543
1544 if (m_shouldEmitProfileHooks) {
1545 emitOpcode(op_profile_did_call);
1546 instructions().append(func->index());
1547
1548 if (dst == originalFunc)
1549 func->deref();
1550 }
1551
1552 return dst;
1553 }
1554
emitStrcat(RegisterID * dst,RegisterID * src,int count)1555 RegisterID* BytecodeGenerator::emitStrcat(RegisterID* dst, RegisterID* src, int count)
1556 {
1557 emitOpcode(op_strcat);
1558 instructions().append(dst->index());
1559 instructions().append(src->index());
1560 instructions().append(count);
1561
1562 return dst;
1563 }
1564
emitToPrimitive(RegisterID * dst,RegisterID * src)1565 void BytecodeGenerator::emitToPrimitive(RegisterID* dst, RegisterID* src)
1566 {
1567 emitOpcode(op_to_primitive);
1568 instructions().append(dst->index());
1569 instructions().append(src->index());
1570 }
1571
emitPushScope(RegisterID * scope)1572 RegisterID* BytecodeGenerator::emitPushScope(RegisterID* scope)
1573 {
1574 ASSERT(scope->isTemporary());
1575 ControlFlowContext context;
1576 context.isFinallyBlock = false;
1577 m_scopeContextStack.append(context);
1578 m_dynamicScopeDepth++;
1579 createArgumentsIfNecessary();
1580
1581 return emitUnaryNoDstOp(op_push_scope, scope);
1582 }
1583
emitPopScope()1584 void BytecodeGenerator::emitPopScope()
1585 {
1586 ASSERT(m_scopeContextStack.size());
1587 ASSERT(!m_scopeContextStack.last().isFinallyBlock);
1588
1589 emitOpcode(op_pop_scope);
1590
1591 m_scopeContextStack.removeLast();
1592 m_dynamicScopeDepth--;
1593 }
1594
emitDebugHook(DebugHookID debugHookID,int firstLine,int lastLine)1595 void BytecodeGenerator::emitDebugHook(DebugHookID debugHookID, int firstLine, int lastLine)
1596 {
1597 if (!m_shouldEmitDebugHooks)
1598 return;
1599 emitOpcode(op_debug);
1600 instructions().append(debugHookID);
1601 instructions().append(firstLine);
1602 instructions().append(lastLine);
1603 }
1604
pushFinallyContext(Label * target,RegisterID * retAddrDst)1605 void BytecodeGenerator::pushFinallyContext(Label* target, RegisterID* retAddrDst)
1606 {
1607 ControlFlowContext scope;
1608 scope.isFinallyBlock = true;
1609 FinallyContext context = { target, retAddrDst };
1610 scope.finallyContext = context;
1611 m_scopeContextStack.append(scope);
1612 m_finallyDepth++;
1613 }
1614
popFinallyContext()1615 void BytecodeGenerator::popFinallyContext()
1616 {
1617 ASSERT(m_scopeContextStack.size());
1618 ASSERT(m_scopeContextStack.last().isFinallyBlock);
1619 ASSERT(m_finallyDepth > 0);
1620 m_scopeContextStack.removeLast();
1621 m_finallyDepth--;
1622 }
1623
breakTarget(const Identifier & name)1624 LabelScope* BytecodeGenerator::breakTarget(const Identifier& name)
1625 {
1626 // Reclaim free label scopes.
1627 //
1628 // The condition was previously coded as 'm_labelScopes.size() && !m_labelScopes.last().refCount()',
1629 // however sometimes this appears to lead to GCC going a little haywire and entering the loop with
1630 // size 0, leading to segfaulty badness. We are yet to identify a valid cause within our code to
1631 // cause the GCC codegen to misbehave in this fashion, and as such the following refactoring of the
1632 // loop condition is a workaround.
1633 while (m_labelScopes.size()) {
1634 if (m_labelScopes.last().refCount())
1635 break;
1636 m_labelScopes.removeLast();
1637 }
1638
1639 if (!m_labelScopes.size())
1640 return 0;
1641
1642 // We special-case the following, which is a syntax error in Firefox:
1643 // label:
1644 // break;
1645 if (name.isEmpty()) {
1646 for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
1647 LabelScope* scope = &m_labelScopes[i];
1648 if (scope->type() != LabelScope::NamedLabel) {
1649 ASSERT(scope->breakTarget());
1650 return scope;
1651 }
1652 }
1653 return 0;
1654 }
1655
1656 for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
1657 LabelScope* scope = &m_labelScopes[i];
1658 if (scope->name() && *scope->name() == name) {
1659 ASSERT(scope->breakTarget());
1660 return scope;
1661 }
1662 }
1663 return 0;
1664 }
1665
continueTarget(const Identifier & name)1666 LabelScope* BytecodeGenerator::continueTarget(const Identifier& name)
1667 {
1668 // Reclaim free label scopes.
1669 while (m_labelScopes.size() && !m_labelScopes.last().refCount())
1670 m_labelScopes.removeLast();
1671
1672 if (!m_labelScopes.size())
1673 return 0;
1674
1675 if (name.isEmpty()) {
1676 for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
1677 LabelScope* scope = &m_labelScopes[i];
1678 if (scope->type() == LabelScope::Loop) {
1679 ASSERT(scope->continueTarget());
1680 return scope;
1681 }
1682 }
1683 return 0;
1684 }
1685
1686 // Continue to the loop nested nearest to the label scope that matches
1687 // 'name'.
1688 LabelScope* result = 0;
1689 for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
1690 LabelScope* scope = &m_labelScopes[i];
1691 if (scope->type() == LabelScope::Loop) {
1692 ASSERT(scope->continueTarget());
1693 result = scope;
1694 }
1695 if (scope->name() && *scope->name() == name)
1696 return result; // may be 0
1697 }
1698 return 0;
1699 }
1700
emitComplexJumpScopes(Label * target,ControlFlowContext * topScope,ControlFlowContext * bottomScope)1701 PassRefPtr<Label> BytecodeGenerator::emitComplexJumpScopes(Label* target, ControlFlowContext* topScope, ControlFlowContext* bottomScope)
1702 {
1703 while (topScope > bottomScope) {
1704 // First we count the number of dynamic scopes we need to remove to get
1705 // to a finally block.
1706 int nNormalScopes = 0;
1707 while (topScope > bottomScope) {
1708 if (topScope->isFinallyBlock)
1709 break;
1710 ++nNormalScopes;
1711 --topScope;
1712 }
1713
1714 if (nNormalScopes) {
1715 // We need to remove a number of dynamic scopes to get to the next
1716 // finally block
1717 emitOpcode(op_jmp_scopes);
1718 instructions().append(nNormalScopes);
1719
1720 // If topScope == bottomScope then there isn't actually a finally block
1721 // left to emit, so make the jmp_scopes jump directly to the target label
1722 if (topScope == bottomScope) {
1723 instructions().append(target->offsetFrom(instructions().size()));
1724 return target;
1725 }
1726
1727 // Otherwise we just use jmp_scopes to pop a group of scopes and go
1728 // to the next instruction
1729 RefPtr<Label> nextInsn = newLabel();
1730 instructions().append(nextInsn->offsetFrom(instructions().size()));
1731 emitLabel(nextInsn.get());
1732 }
1733
1734 while (topScope > bottomScope && topScope->isFinallyBlock) {
1735 emitJumpSubroutine(topScope->finallyContext.retAddrDst, topScope->finallyContext.finallyAddr);
1736 --topScope;
1737 }
1738 }
1739 return emitJump(target);
1740 }
1741
emitJumpScopes(Label * target,int targetScopeDepth)1742 PassRefPtr<Label> BytecodeGenerator::emitJumpScopes(Label* target, int targetScopeDepth)
1743 {
1744 ASSERT(scopeDepth() - targetScopeDepth >= 0);
1745 ASSERT(target->isForward());
1746
1747 size_t scopeDelta = scopeDepth() - targetScopeDepth;
1748 ASSERT(scopeDelta <= m_scopeContextStack.size());
1749 if (!scopeDelta)
1750 return emitJump(target);
1751
1752 if (m_finallyDepth)
1753 return emitComplexJumpScopes(target, &m_scopeContextStack.last(), &m_scopeContextStack.last() - scopeDelta);
1754
1755 emitOpcode(op_jmp_scopes);
1756 instructions().append(scopeDelta);
1757 instructions().append(target->offsetFrom(instructions().size()));
1758 return target;
1759 }
1760
emitNextPropertyName(RegisterID * dst,RegisterID * iter,Label * target)1761 RegisterID* BytecodeGenerator::emitNextPropertyName(RegisterID* dst, RegisterID* iter, Label* target)
1762 {
1763 emitOpcode(op_next_pname);
1764 instructions().append(dst->index());
1765 instructions().append(iter->index());
1766 instructions().append(target->offsetFrom(instructions().size()));
1767 return dst;
1768 }
1769
emitCatch(RegisterID * targetRegister,Label * start,Label * end)1770 RegisterID* BytecodeGenerator::emitCatch(RegisterID* targetRegister, Label* start, Label* end)
1771 {
1772 #if ENABLE(JIT)
1773 HandlerInfo info = { start->offsetFrom(0), end->offsetFrom(0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth, CodeLocationLabel() };
1774 #else
1775 HandlerInfo info = { start->offsetFrom(0), end->offsetFrom(0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth };
1776 #endif
1777
1778 m_codeBlock->addExceptionHandler(info);
1779 emitOpcode(op_catch);
1780 instructions().append(targetRegister->index());
1781 return targetRegister;
1782 }
1783
emitNewError(RegisterID * dst,ErrorType type,JSValue message)1784 RegisterID* BytecodeGenerator::emitNewError(RegisterID* dst, ErrorType type, JSValue message)
1785 {
1786 emitOpcode(op_new_error);
1787 instructions().append(dst->index());
1788 instructions().append(static_cast<int>(type));
1789 instructions().append(addConstantValue(message)->index());
1790 return dst;
1791 }
1792
emitJumpSubroutine(RegisterID * retAddrDst,Label * finally)1793 PassRefPtr<Label> BytecodeGenerator::emitJumpSubroutine(RegisterID* retAddrDst, Label* finally)
1794 {
1795 emitOpcode(op_jsr);
1796 instructions().append(retAddrDst->index());
1797 instructions().append(finally->offsetFrom(instructions().size()));
1798 emitLabel(newLabel().get()); // Record the fact that the next instruction is implicitly labeled, because op_sret will return to it.
1799 return finally;
1800 }
1801
emitSubroutineReturn(RegisterID * retAddrSrc)1802 void BytecodeGenerator::emitSubroutineReturn(RegisterID* retAddrSrc)
1803 {
1804 emitOpcode(op_sret);
1805 instructions().append(retAddrSrc->index());
1806 }
1807
emitPushNewScope(RegisterID * dst,Identifier & property,RegisterID * value)1808 void BytecodeGenerator::emitPushNewScope(RegisterID* dst, Identifier& property, RegisterID* value)
1809 {
1810 ControlFlowContext context;
1811 context.isFinallyBlock = false;
1812 m_scopeContextStack.append(context);
1813 m_dynamicScopeDepth++;
1814
1815 createArgumentsIfNecessary();
1816
1817 emitOpcode(op_push_new_scope);
1818 instructions().append(dst->index());
1819 instructions().append(addConstant(property));
1820 instructions().append(value->index());
1821 }
1822
beginSwitch(RegisterID * scrutineeRegister,SwitchInfo::SwitchType type)1823 void BytecodeGenerator::beginSwitch(RegisterID* scrutineeRegister, SwitchInfo::SwitchType type)
1824 {
1825 SwitchInfo info = { instructions().size(), type };
1826 switch (type) {
1827 case SwitchInfo::SwitchImmediate:
1828 emitOpcode(op_switch_imm);
1829 break;
1830 case SwitchInfo::SwitchCharacter:
1831 emitOpcode(op_switch_char);
1832 break;
1833 case SwitchInfo::SwitchString:
1834 emitOpcode(op_switch_string);
1835 break;
1836 default:
1837 ASSERT_NOT_REACHED();
1838 }
1839
1840 instructions().append(0); // place holder for table index
1841 instructions().append(0); // place holder for default target
1842 instructions().append(scrutineeRegister->index());
1843 m_switchContextStack.append(info);
1844 }
1845
keyForImmediateSwitch(ExpressionNode * node,int32_t min,int32_t max)1846 static int32_t keyForImmediateSwitch(ExpressionNode* node, int32_t min, int32_t max)
1847 {
1848 UNUSED_PARAM(max);
1849 ASSERT(node->isNumber());
1850 double value = static_cast<NumberNode*>(node)->value();
1851 int32_t key = static_cast<int32_t>(value);
1852 ASSERT(key == value);
1853 ASSERT(key >= min);
1854 ASSERT(key <= max);
1855 return key - min;
1856 }
1857
prepareJumpTableForImmediateSwitch(SimpleJumpTable & jumpTable,int32_t switchAddress,uint32_t clauseCount,RefPtr<Label> * labels,ExpressionNode ** nodes,int32_t min,int32_t max)1858 static void prepareJumpTableForImmediateSwitch(SimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max)
1859 {
1860 jumpTable.min = min;
1861 jumpTable.branchOffsets.resize(max - min + 1);
1862 jumpTable.branchOffsets.fill(0);
1863 for (uint32_t i = 0; i < clauseCount; ++i) {
1864 // We're emitting this after the clause labels should have been fixed, so
1865 // the labels should not be "forward" references
1866 ASSERT(!labels[i]->isForward());
1867 jumpTable.add(keyForImmediateSwitch(nodes[i], min, max), labels[i]->offsetFrom(switchAddress));
1868 }
1869 }
1870
keyForCharacterSwitch(ExpressionNode * node,int32_t min,int32_t max)1871 static int32_t keyForCharacterSwitch(ExpressionNode* node, int32_t min, int32_t max)
1872 {
1873 UNUSED_PARAM(max);
1874 ASSERT(node->isString());
1875 UString::Rep* clause = static_cast<StringNode*>(node)->value().ustring().rep();
1876 ASSERT(clause->size() == 1);
1877
1878 int32_t key = clause->data()[0];
1879 ASSERT(key >= min);
1880 ASSERT(key <= max);
1881 return key - min;
1882 }
1883
prepareJumpTableForCharacterSwitch(SimpleJumpTable & jumpTable,int32_t switchAddress,uint32_t clauseCount,RefPtr<Label> * labels,ExpressionNode ** nodes,int32_t min,int32_t max)1884 static void prepareJumpTableForCharacterSwitch(SimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max)
1885 {
1886 jumpTable.min = min;
1887 jumpTable.branchOffsets.resize(max - min + 1);
1888 jumpTable.branchOffsets.fill(0);
1889 for (uint32_t i = 0; i < clauseCount; ++i) {
1890 // We're emitting this after the clause labels should have been fixed, so
1891 // the labels should not be "forward" references
1892 ASSERT(!labels[i]->isForward());
1893 jumpTable.add(keyForCharacterSwitch(nodes[i], min, max), labels[i]->offsetFrom(switchAddress));
1894 }
1895 }
1896
prepareJumpTableForStringSwitch(StringJumpTable & jumpTable,int32_t switchAddress,uint32_t clauseCount,RefPtr<Label> * labels,ExpressionNode ** nodes)1897 static void prepareJumpTableForStringSwitch(StringJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes)
1898 {
1899 for (uint32_t i = 0; i < clauseCount; ++i) {
1900 // We're emitting this after the clause labels should have been fixed, so
1901 // the labels should not be "forward" references
1902 ASSERT(!labels[i]->isForward());
1903
1904 ASSERT(nodes[i]->isString());
1905 UString::Rep* clause = static_cast<StringNode*>(nodes[i])->value().ustring().rep();
1906 OffsetLocation location;
1907 location.branchOffset = labels[i]->offsetFrom(switchAddress);
1908 jumpTable.offsetTable.add(clause, location);
1909 }
1910 }
1911
endSwitch(uint32_t clauseCount,RefPtr<Label> * labels,ExpressionNode ** nodes,Label * defaultLabel,int32_t min,int32_t max)1912 void BytecodeGenerator::endSwitch(uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes, Label* defaultLabel, int32_t min, int32_t max)
1913 {
1914 SwitchInfo switchInfo = m_switchContextStack.last();
1915 m_switchContextStack.removeLast();
1916 if (switchInfo.switchType == SwitchInfo::SwitchImmediate) {
1917 instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfImmediateSwitchJumpTables();
1918 instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->offsetFrom(switchInfo.bytecodeOffset + 3);
1919
1920 SimpleJumpTable& jumpTable = m_codeBlock->addImmediateSwitchJumpTable();
1921 prepareJumpTableForImmediateSwitch(jumpTable, switchInfo.bytecodeOffset + 3, clauseCount, labels, nodes, min, max);
1922 } else if (switchInfo.switchType == SwitchInfo::SwitchCharacter) {
1923 instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfCharacterSwitchJumpTables();
1924 instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->offsetFrom(switchInfo.bytecodeOffset + 3);
1925
1926 SimpleJumpTable& jumpTable = m_codeBlock->addCharacterSwitchJumpTable();
1927 prepareJumpTableForCharacterSwitch(jumpTable, switchInfo.bytecodeOffset + 3, clauseCount, labels, nodes, min, max);
1928 } else {
1929 ASSERT(switchInfo.switchType == SwitchInfo::SwitchString);
1930 instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfStringSwitchJumpTables();
1931 instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->offsetFrom(switchInfo.bytecodeOffset + 3);
1932
1933 StringJumpTable& jumpTable = m_codeBlock->addStringSwitchJumpTable();
1934 prepareJumpTableForStringSwitch(jumpTable, switchInfo.bytecodeOffset + 3, clauseCount, labels, nodes);
1935 }
1936 }
1937
emitThrowExpressionTooDeepException()1938 RegisterID* BytecodeGenerator::emitThrowExpressionTooDeepException()
1939 {
1940 // It would be nice to do an even better job of identifying exactly where the expression is.
1941 // And we could make the caller pass the node pointer in, if there was some way of getting
1942 // that from an arbitrary node. However, calling emitExpressionInfo without any useful data
1943 // is still good enough to get us an accurate line number.
1944 emitExpressionInfo(0, 0, 0);
1945 RegisterID* exception = emitNewError(newTemporary(), SyntaxError, jsString(globalData(), "Expression too deep"));
1946 emitThrow(exception);
1947 return exception;
1948 }
1949
1950 } // namespace JSC
1951