1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "bootstrapper.h"
31 #include "codegen-inl.h"
32 #include "compiler.h"
33 #include "debug.h"
34 #include "ic-inl.h"
35 #include "jsregexp.h"
36 #include "parser.h"
37 #include "regexp-macro-assembler.h"
38 #include "regexp-stack.h"
39 #include "register-allocator-inl.h"
40 #include "runtime.h"
41 #include "scopes.h"
42
43 namespace v8 {
44 namespace internal {
45
46 #define __ ACCESS_MASM(masm_)
47
48 // -------------------------------------------------------------------------
49 // Platform-specific DeferredCode functions.
50
SaveRegisters()51 void DeferredCode::SaveRegisters() {
52 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
53 int action = registers_[i];
54 if (action == kPush) {
55 __ push(RegisterAllocator::ToRegister(i));
56 } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
57 __ mov(Operand(ebp, action), RegisterAllocator::ToRegister(i));
58 }
59 }
60 }
61
62
RestoreRegisters()63 void DeferredCode::RestoreRegisters() {
64 // Restore registers in reverse order due to the stack.
65 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
66 int action = registers_[i];
67 if (action == kPush) {
68 __ pop(RegisterAllocator::ToRegister(i));
69 } else if (action != kIgnore) {
70 action &= ~kSyncedFlag;
71 __ mov(RegisterAllocator::ToRegister(i), Operand(ebp, action));
72 }
73 }
74 }
75
76
77 // -------------------------------------------------------------------------
78 // CodeGenState implementation.
79
CodeGenState(CodeGenerator * owner)80 CodeGenState::CodeGenState(CodeGenerator* owner)
81 : owner_(owner),
82 destination_(NULL),
83 previous_(NULL) {
84 owner_->set_state(this);
85 }
86
87
CodeGenState(CodeGenerator * owner,ControlDestination * destination)88 CodeGenState::CodeGenState(CodeGenerator* owner,
89 ControlDestination* destination)
90 : owner_(owner),
91 destination_(destination),
92 previous_(owner->state()) {
93 owner_->set_state(this);
94 }
95
96
~CodeGenState()97 CodeGenState::~CodeGenState() {
98 ASSERT(owner_->state() == this);
99 owner_->set_state(previous_);
100 }
101
102
103 // -------------------------------------------------------------------------
104 // CodeGenerator implementation
105
CodeGenerator(MacroAssembler * masm)106 CodeGenerator::CodeGenerator(MacroAssembler* masm)
107 : deferred_(8),
108 masm_(masm),
109 info_(NULL),
110 frame_(NULL),
111 allocator_(NULL),
112 state_(NULL),
113 loop_nesting_(0),
114 function_return_is_shadowed_(false),
115 in_spilled_code_(false) {
116 }
117
118
scope()119 Scope* CodeGenerator::scope() { return info_->function()->scope(); }
120
121
122 // Calling conventions:
123 // ebp: caller's frame pointer
124 // esp: stack pointer
125 // edi: called JS function
126 // esi: callee's context
127
Generate(CompilationInfo * info)128 void CodeGenerator::Generate(CompilationInfo* info) {
129 // Record the position for debugging purposes.
130 CodeForFunctionPosition(info->function());
131
132 // Initialize state.
133 info_ = info;
134 ASSERT(allocator_ == NULL);
135 RegisterAllocator register_allocator(this);
136 allocator_ = ®ister_allocator;
137 ASSERT(frame_ == NULL);
138 frame_ = new VirtualFrame();
139 set_in_spilled_code(false);
140
141 // Adjust for function-level loop nesting.
142 loop_nesting_ += info->loop_nesting();
143
144 JumpTarget::set_compiling_deferred_code(false);
145
146 #ifdef DEBUG
147 if (strlen(FLAG_stop_at) > 0 &&
148 info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
149 frame_->SpillAll();
150 __ int3();
151 }
152 #endif
153
154 // New scope to get automatic timing calculation.
155 { // NOLINT
156 HistogramTimerScope codegen_timer(&Counters::code_generation);
157 CodeGenState state(this);
158
159 // Entry:
160 // Stack: receiver, arguments, return address.
161 // ebp: caller's frame pointer
162 // esp: stack pointer
163 // edi: called JS function
164 // esi: callee's context
165 allocator_->Initialize();
166
167 if (info->mode() == CompilationInfo::PRIMARY) {
168 frame_->Enter();
169
170 // Allocate space for locals and initialize them.
171 frame_->AllocateStackSlots();
172
173 // Allocate the local context if needed.
174 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
175 if (heap_slots > 0) {
176 Comment cmnt(masm_, "[ allocate local context");
177 // Allocate local context.
178 // Get outer context and create a new context based on it.
179 frame_->PushFunction();
180 Result context;
181 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
182 FastNewContextStub stub(heap_slots);
183 context = frame_->CallStub(&stub, 1);
184 } else {
185 context = frame_->CallRuntime(Runtime::kNewContext, 1);
186 }
187
188 // Update context local.
189 frame_->SaveContextRegister();
190
191 // Verify that the runtime call result and esi agree.
192 if (FLAG_debug_code) {
193 __ cmp(context.reg(), Operand(esi));
194 __ Assert(equal, "Runtime::NewContext should end up in esi");
195 }
196 }
197
198 // TODO(1241774): Improve this code:
199 // 1) only needed if we have a context
200 // 2) no need to recompute context ptr every single time
201 // 3) don't copy parameter operand code from SlotOperand!
202 {
203 Comment cmnt2(masm_, "[ copy context parameters into .context");
204 // Note that iteration order is relevant here! If we have the same
205 // parameter twice (e.g., function (x, y, x)), and that parameter
206 // needs to be copied into the context, it must be the last argument
207 // passed to the parameter that needs to be copied. This is a rare
208 // case so we don't check for it, instead we rely on the copying
209 // order: such a parameter is copied repeatedly into the same
210 // context location and thus the last value is what is seen inside
211 // the function.
212 for (int i = 0; i < scope()->num_parameters(); i++) {
213 Variable* par = scope()->parameter(i);
214 Slot* slot = par->slot();
215 if (slot != NULL && slot->type() == Slot::CONTEXT) {
216 // The use of SlotOperand below is safe in unspilled code
217 // because the slot is guaranteed to be a context slot.
218 //
219 // There are no parameters in the global scope.
220 ASSERT(!scope()->is_global_scope());
221 frame_->PushParameterAt(i);
222 Result value = frame_->Pop();
223 value.ToRegister();
224
225 // SlotOperand loads context.reg() with the context object
226 // stored to, used below in RecordWrite.
227 Result context = allocator_->Allocate();
228 ASSERT(context.is_valid());
229 __ mov(SlotOperand(slot, context.reg()), value.reg());
230 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
231 Result scratch = allocator_->Allocate();
232 ASSERT(scratch.is_valid());
233 frame_->Spill(context.reg());
234 frame_->Spill(value.reg());
235 __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
236 }
237 }
238 }
239
240 // Store the arguments object. This must happen after context
241 // initialization because the arguments object may be stored in
242 // the context.
243 if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
244 StoreArgumentsObject(true);
245 }
246
247 // Initialize ThisFunction reference if present.
248 if (scope()->is_function_scope() && scope()->function() != NULL) {
249 frame_->Push(Factory::the_hole_value());
250 StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
251 }
252 } else {
253 // When used as the secondary compiler for splitting, ebp, esi,
254 // and edi have been pushed on the stack. Adjust the virtual
255 // frame to match this state.
256 frame_->Adjust(3);
257 allocator_->Unuse(edi);
258
259 // Bind all the bailout labels to the beginning of the function.
260 List<CompilationInfo::Bailout*>* bailouts = info->bailouts();
261 for (int i = 0; i < bailouts->length(); i++) {
262 __ bind(bailouts->at(i)->label());
263 }
264 }
265
266 // Initialize the function return target after the locals are set
267 // up, because it needs the expected frame height from the frame.
268 function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
269 function_return_is_shadowed_ = false;
270
271 // Generate code to 'execute' declarations and initialize functions
272 // (source elements). In case of an illegal redeclaration we need to
273 // handle that instead of processing the declarations.
274 if (scope()->HasIllegalRedeclaration()) {
275 Comment cmnt(masm_, "[ illegal redeclarations");
276 scope()->VisitIllegalRedeclaration(this);
277 } else {
278 Comment cmnt(masm_, "[ declarations");
279 ProcessDeclarations(scope()->declarations());
280 // Bail out if a stack-overflow exception occurred when processing
281 // declarations.
282 if (HasStackOverflow()) return;
283 }
284
285 if (FLAG_trace) {
286 frame_->CallRuntime(Runtime::kTraceEnter, 0);
287 // Ignore the return value.
288 }
289 CheckStack();
290
291 // Compile the body of the function in a vanilla state. Don't
292 // bother compiling all the code if the scope has an illegal
293 // redeclaration.
294 if (!scope()->HasIllegalRedeclaration()) {
295 Comment cmnt(masm_, "[ function body");
296 #ifdef DEBUG
297 bool is_builtin = Bootstrapper::IsActive();
298 bool should_trace =
299 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
300 if (should_trace) {
301 frame_->CallRuntime(Runtime::kDebugTrace, 0);
302 // Ignore the return value.
303 }
304 #endif
305 VisitStatements(info->function()->body());
306
307 // Handle the return from the function.
308 if (has_valid_frame()) {
309 // If there is a valid frame, control flow can fall off the end of
310 // the body. In that case there is an implicit return statement.
311 ASSERT(!function_return_is_shadowed_);
312 CodeForReturnPosition(info->function());
313 frame_->PrepareForReturn();
314 Result undefined(Factory::undefined_value());
315 if (function_return_.is_bound()) {
316 function_return_.Jump(&undefined);
317 } else {
318 function_return_.Bind(&undefined);
319 GenerateReturnSequence(&undefined);
320 }
321 } else if (function_return_.is_linked()) {
322 // If the return target has dangling jumps to it, then we have not
323 // yet generated the return sequence. This can happen when (a)
324 // control does not flow off the end of the body so we did not
325 // compile an artificial return statement just above, and (b) there
326 // are return statements in the body but (c) they are all shadowed.
327 Result return_value;
328 function_return_.Bind(&return_value);
329 GenerateReturnSequence(&return_value);
330 }
331 }
332 }
333
334 // Adjust for function-level loop nesting.
335 loop_nesting_ -= info->loop_nesting();
336
337 // Code generation state must be reset.
338 ASSERT(state_ == NULL);
339 ASSERT(loop_nesting() == 0);
340 ASSERT(!function_return_is_shadowed_);
341 function_return_.Unuse();
342 DeleteFrame();
343
344 // Process any deferred code using the register allocator.
345 if (!HasStackOverflow()) {
346 HistogramTimerScope deferred_timer(&Counters::deferred_code_generation);
347 JumpTarget::set_compiling_deferred_code(true);
348 ProcessDeferred();
349 JumpTarget::set_compiling_deferred_code(false);
350 }
351
352 // There is no need to delete the register allocator, it is a
353 // stack-allocated local.
354 allocator_ = NULL;
355 }
356
357
SlotOperand(Slot * slot,Register tmp)358 Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
359 // Currently, this assertion will fail if we try to assign to
360 // a constant variable that is constant because it is read-only
361 // (such as the variable referring to a named function expression).
362 // We need to implement assignments to read-only variables.
363 // Ideally, we should do this during AST generation (by converting
364 // such assignments into expression statements); however, in general
365 // we may not be able to make the decision until past AST generation,
366 // that is when the entire program is known.
367 ASSERT(slot != NULL);
368 int index = slot->index();
369 switch (slot->type()) {
370 case Slot::PARAMETER:
371 return frame_->ParameterAt(index);
372
373 case Slot::LOCAL:
374 return frame_->LocalAt(index);
375
376 case Slot::CONTEXT: {
377 // Follow the context chain if necessary.
378 ASSERT(!tmp.is(esi)); // do not overwrite context register
379 Register context = esi;
380 int chain_length = scope()->ContextChainLength(slot->var()->scope());
381 for (int i = 0; i < chain_length; i++) {
382 // Load the closure.
383 // (All contexts, even 'with' contexts, have a closure,
384 // and it is the same for all contexts inside a function.
385 // There is no need to go to the function context first.)
386 __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
387 // Load the function context (which is the incoming, outer context).
388 __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
389 context = tmp;
390 }
391 // We may have a 'with' context now. Get the function context.
392 // (In fact this mov may never be the needed, since the scope analysis
393 // may not permit a direct context access in this case and thus we are
394 // always at a function context. However it is safe to dereference be-
395 // cause the function context of a function context is itself. Before
396 // deleting this mov we should try to create a counter-example first,
397 // though...)
398 __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
399 return ContextOperand(tmp, index);
400 }
401
402 default:
403 UNREACHABLE();
404 return Operand(eax);
405 }
406 }
407
408
ContextSlotOperandCheckExtensions(Slot * slot,Result tmp,JumpTarget * slow)409 Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
410 Result tmp,
411 JumpTarget* slow) {
412 ASSERT(slot->type() == Slot::CONTEXT);
413 ASSERT(tmp.is_register());
414 Register context = esi;
415
416 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
417 if (s->num_heap_slots() > 0) {
418 if (s->calls_eval()) {
419 // Check that extension is NULL.
420 __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
421 Immediate(0));
422 slow->Branch(not_equal, not_taken);
423 }
424 __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
425 __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
426 context = tmp.reg();
427 }
428 }
429 // Check that last extension is NULL.
430 __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
431 slow->Branch(not_equal, not_taken);
432 __ mov(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
433 return ContextOperand(tmp.reg(), slot->index());
434 }
435
436
437 // Emit code to load the value of an expression to the top of the
438 // frame. If the expression is boolean-valued it may be compiled (or
439 // partially compiled) into control flow to the control destination.
440 // If force_control is true, control flow is forced.
LoadCondition(Expression * x,ControlDestination * dest,bool force_control)441 void CodeGenerator::LoadCondition(Expression* x,
442 ControlDestination* dest,
443 bool force_control) {
444 ASSERT(!in_spilled_code());
445 int original_height = frame_->height();
446
447 { CodeGenState new_state(this, dest);
448 Visit(x);
449
450 // If we hit a stack overflow, we may not have actually visited
451 // the expression. In that case, we ensure that we have a
452 // valid-looking frame state because we will continue to generate
453 // code as we unwind the C++ stack.
454 //
455 // It's possible to have both a stack overflow and a valid frame
456 // state (eg, a subexpression overflowed, visiting it returned
457 // with a dummied frame state, and visiting this expression
458 // returned with a normal-looking state).
459 if (HasStackOverflow() &&
460 !dest->is_used() &&
461 frame_->height() == original_height) {
462 dest->Goto(true);
463 }
464 }
465
466 if (force_control && !dest->is_used()) {
467 // Convert the TOS value into flow to the control destination.
468 ToBoolean(dest);
469 }
470
471 ASSERT(!(force_control && !dest->is_used()));
472 ASSERT(dest->is_used() || frame_->height() == original_height + 1);
473 }
474
475
LoadAndSpill(Expression * expression)476 void CodeGenerator::LoadAndSpill(Expression* expression) {
477 ASSERT(in_spilled_code());
478 set_in_spilled_code(false);
479 Load(expression);
480 frame_->SpillAll();
481 set_in_spilled_code(true);
482 }
483
484
Load(Expression * expr)485 void CodeGenerator::Load(Expression* expr) {
486 #ifdef DEBUG
487 int original_height = frame_->height();
488 #endif
489 ASSERT(!in_spilled_code());
490 JumpTarget true_target;
491 JumpTarget false_target;
492 ControlDestination dest(&true_target, &false_target, true);
493 LoadCondition(expr, &dest, false);
494
495 if (dest.false_was_fall_through()) {
496 // The false target was just bound.
497 JumpTarget loaded;
498 frame_->Push(Factory::false_value());
499 // There may be dangling jumps to the true target.
500 if (true_target.is_linked()) {
501 loaded.Jump();
502 true_target.Bind();
503 frame_->Push(Factory::true_value());
504 loaded.Bind();
505 }
506
507 } else if (dest.is_used()) {
508 // There is true, and possibly false, control flow (with true as
509 // the fall through).
510 JumpTarget loaded;
511 frame_->Push(Factory::true_value());
512 if (false_target.is_linked()) {
513 loaded.Jump();
514 false_target.Bind();
515 frame_->Push(Factory::false_value());
516 loaded.Bind();
517 }
518
519 } else {
520 // We have a valid value on top of the frame, but we still may
521 // have dangling jumps to the true and false targets from nested
522 // subexpressions (eg, the left subexpressions of the
523 // short-circuited boolean operators).
524 ASSERT(has_valid_frame());
525 if (true_target.is_linked() || false_target.is_linked()) {
526 JumpTarget loaded;
527 loaded.Jump(); // Don't lose the current TOS.
528 if (true_target.is_linked()) {
529 true_target.Bind();
530 frame_->Push(Factory::true_value());
531 if (false_target.is_linked()) {
532 loaded.Jump();
533 }
534 }
535 if (false_target.is_linked()) {
536 false_target.Bind();
537 frame_->Push(Factory::false_value());
538 }
539 loaded.Bind();
540 }
541 }
542
543 ASSERT(has_valid_frame());
544 ASSERT(frame_->height() == original_height + 1);
545 }
546
547
LoadGlobal()548 void CodeGenerator::LoadGlobal() {
549 if (in_spilled_code()) {
550 frame_->EmitPush(GlobalObject());
551 } else {
552 Result temp = allocator_->Allocate();
553 __ mov(temp.reg(), GlobalObject());
554 frame_->Push(&temp);
555 }
556 }
557
558
LoadGlobalReceiver()559 void CodeGenerator::LoadGlobalReceiver() {
560 Result temp = allocator_->Allocate();
561 Register reg = temp.reg();
562 __ mov(reg, GlobalObject());
563 __ mov(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
564 frame_->Push(&temp);
565 }
566
567
LoadTypeofExpression(Expression * expr)568 void CodeGenerator::LoadTypeofExpression(Expression* expr) {
569 // Special handling of identifiers as subexpressions of typeof.
570 Variable* variable = expr->AsVariableProxy()->AsVariable();
571 if (variable != NULL && !variable->is_this() && variable->is_global()) {
572 // For a global variable we build the property reference
573 // <global>.<variable> and perform a (regular non-contextual) property
574 // load to make sure we do not get reference errors.
575 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
576 Literal key(variable->name());
577 Property property(&global, &key, RelocInfo::kNoPosition);
578 Reference ref(this, &property);
579 ref.GetValue();
580 } else if (variable != NULL && variable->slot() != NULL) {
581 // For a variable that rewrites to a slot, we signal it is the immediate
582 // subexpression of a typeof.
583 Result result =
584 LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
585 frame()->Push(&result);
586 } else {
587 // Anything else can be handled normally.
588 Load(expr);
589 }
590 }
591
592
ArgumentsMode()593 ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
594 if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
595 ASSERT(scope()->arguments_shadow() != NULL);
596 // We don't want to do lazy arguments allocation for functions that
597 // have heap-allocated contexts, because it interfers with the
598 // uninitialized const tracking in the context objects.
599 return (scope()->num_heap_slots() > 0)
600 ? EAGER_ARGUMENTS_ALLOCATION
601 : LAZY_ARGUMENTS_ALLOCATION;
602 }
603
604
StoreArgumentsObject(bool initial)605 Result CodeGenerator::StoreArgumentsObject(bool initial) {
606 ArgumentsAllocationMode mode = ArgumentsMode();
607 ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
608
609 Comment cmnt(masm_, "[ store arguments object");
610 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
611 // When using lazy arguments allocation, we store the hole value
612 // as a sentinel indicating that the arguments object hasn't been
613 // allocated yet.
614 frame_->Push(Factory::the_hole_value());
615 } else {
616 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
617 frame_->PushFunction();
618 frame_->PushReceiverSlotAddress();
619 frame_->Push(Smi::FromInt(scope()->num_parameters()));
620 Result result = frame_->CallStub(&stub, 3);
621 frame_->Push(&result);
622 }
623
624 Variable* arguments = scope()->arguments()->var();
625 Variable* shadow = scope()->arguments_shadow()->var();
626 ASSERT(arguments != NULL && arguments->slot() != NULL);
627 ASSERT(shadow != NULL && shadow->slot() != NULL);
628 JumpTarget done;
629 bool skip_arguments = false;
630 if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
631 // We have to skip storing into the arguments slot if it has already
632 // been written to. This can happen if the a function has a local
633 // variable named 'arguments'.
634 Result probe = LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
635 if (probe.is_constant()) {
636 // We have to skip updating the arguments object if it has
637 // been assigned a proper value.
638 skip_arguments = !probe.handle()->IsTheHole();
639 } else {
640 __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
641 probe.Unuse();
642 done.Branch(not_equal);
643 }
644 }
645 if (!skip_arguments) {
646 StoreToSlot(arguments->slot(), NOT_CONST_INIT);
647 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
648 }
649 StoreToSlot(shadow->slot(), NOT_CONST_INIT);
650 return frame_->Pop();
651 }
652
653 //------------------------------------------------------------------------------
654 // CodeGenerator implementation of variables, lookups, and stores.
655
Reference(CodeGenerator * cgen,Expression * expression,bool persist_after_get)656 Reference::Reference(CodeGenerator* cgen,
657 Expression* expression,
658 bool persist_after_get)
659 : cgen_(cgen),
660 expression_(expression),
661 type_(ILLEGAL),
662 persist_after_get_(persist_after_get) {
663 cgen->LoadReference(this);
664 }
665
666
~Reference()667 Reference::~Reference() {
668 ASSERT(is_unloaded() || is_illegal());
669 }
670
671
LoadReference(Reference * ref)672 void CodeGenerator::LoadReference(Reference* ref) {
673 // References are loaded from both spilled and unspilled code. Set the
674 // state to unspilled to allow that (and explicitly spill after
675 // construction at the construction sites).
676 bool was_in_spilled_code = in_spilled_code_;
677 in_spilled_code_ = false;
678
679 Comment cmnt(masm_, "[ LoadReference");
680 Expression* e = ref->expression();
681 Property* property = e->AsProperty();
682 Variable* var = e->AsVariableProxy()->AsVariable();
683
684 if (property != NULL) {
685 // The expression is either a property or a variable proxy that rewrites
686 // to a property.
687 Load(property->obj());
688 if (property->key()->IsPropertyName()) {
689 ref->set_type(Reference::NAMED);
690 } else {
691 Load(property->key());
692 ref->set_type(Reference::KEYED);
693 }
694 } else if (var != NULL) {
695 // The expression is a variable proxy that does not rewrite to a
696 // property. Global variables are treated as named property references.
697 if (var->is_global()) {
698 // If eax is free, the register allocator prefers it. Thus the code
699 // generator will load the global object into eax, which is where
700 // LoadIC wants it. Most uses of Reference call LoadIC directly
701 // after the reference is created.
702 frame_->Spill(eax);
703 LoadGlobal();
704 ref->set_type(Reference::NAMED);
705 } else {
706 ASSERT(var->slot() != NULL);
707 ref->set_type(Reference::SLOT);
708 }
709 } else {
710 // Anything else is a runtime error.
711 Load(e);
712 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
713 }
714
715 in_spilled_code_ = was_in_spilled_code;
716 }
717
718
UnloadReference(Reference * ref)719 void CodeGenerator::UnloadReference(Reference* ref) {
720 // Pop a reference from the stack while preserving TOS.
721 Comment cmnt(masm_, "[ UnloadReference");
722 frame_->Nip(ref->size());
723 ref->set_unloaded();
724 }
725
726
727 // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
728 // convert it to a boolean in the condition code register or jump to
729 // 'false_target'/'true_target' as appropriate.
ToBoolean(ControlDestination * dest)730 void CodeGenerator::ToBoolean(ControlDestination* dest) {
731 Comment cmnt(masm_, "[ ToBoolean");
732
733 // The value to convert should be popped from the frame.
734 Result value = frame_->Pop();
735 value.ToRegister();
736
737 if (value.is_number()) {
738 Comment cmnt(masm_, "ONLY_NUMBER");
739 // Fast case if NumberInfo indicates only numbers.
740 if (FLAG_debug_code) {
741 __ AbortIfNotNumber(value.reg(), "ToBoolean operand is not a number.");
742 }
743 // Smi => false iff zero.
744 ASSERT(kSmiTag == 0);
745 __ test(value.reg(), Operand(value.reg()));
746 dest->false_target()->Branch(zero);
747 __ test(value.reg(), Immediate(kSmiTagMask));
748 dest->true_target()->Branch(zero);
749 __ fldz();
750 __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
751 __ FCmp();
752 value.Unuse();
753 dest->Split(not_zero);
754 } else {
755 // Fast case checks.
756 // 'false' => false.
757 __ cmp(value.reg(), Factory::false_value());
758 dest->false_target()->Branch(equal);
759
760 // 'true' => true.
761 __ cmp(value.reg(), Factory::true_value());
762 dest->true_target()->Branch(equal);
763
764 // 'undefined' => false.
765 __ cmp(value.reg(), Factory::undefined_value());
766 dest->false_target()->Branch(equal);
767
768 // Smi => false iff zero.
769 ASSERT(kSmiTag == 0);
770 __ test(value.reg(), Operand(value.reg()));
771 dest->false_target()->Branch(zero);
772 __ test(value.reg(), Immediate(kSmiTagMask));
773 dest->true_target()->Branch(zero);
774
775 // Call the stub for all other cases.
776 frame_->Push(&value); // Undo the Pop() from above.
777 ToBooleanStub stub;
778 Result temp = frame_->CallStub(&stub, 1);
779 // Convert the result to a condition code.
780 __ test(temp.reg(), Operand(temp.reg()));
781 temp.Unuse();
782 dest->Split(not_equal);
783 }
784 }
785
786
787 class FloatingPointHelper : public AllStatic {
788 public:
789
790 enum ArgLocation {
791 ARGS_ON_STACK,
792 ARGS_IN_REGISTERS
793 };
794
795 // Code pattern for loading a floating point value. Input value must
796 // be either a smi or a heap number object (fp value). Requirements:
797 // operand in register number. Returns operand as floating point number
798 // on FPU stack.
799 static void LoadFloatOperand(MacroAssembler* masm, Register number);
800 // Code pattern for loading floating point values. Input values must
801 // be either smi or heap number objects (fp values). Requirements:
802 // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
803 // Returns operands as floating point numbers on FPU stack.
804 static void LoadFloatOperands(MacroAssembler* masm,
805 Register scratch,
806 ArgLocation arg_location = ARGS_ON_STACK);
807
808 // Similar to LoadFloatOperand but assumes that both operands are smis.
809 // Expects operands in edx, eax.
810 static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
811
812 // Test if operands are smi or number objects (fp). Requirements:
813 // operand_1 in eax, operand_2 in edx; falls through on float
814 // operands, jumps to the non_float label otherwise.
815 static void CheckFloatOperands(MacroAssembler* masm,
816 Label* non_float,
817 Register scratch);
818 // Takes the operands in edx and eax and loads them as integers in eax
819 // and ecx.
820 static void LoadAsIntegers(MacroAssembler* masm,
821 bool use_sse3,
822 Label* operand_conversion_failure);
823 // Test if operands are smis or heap numbers and load them
824 // into xmm0 and xmm1 if they are. Operands are in edx and eax.
825 // Leaves operands unchanged.
826 static void LoadSSE2Operands(MacroAssembler* masm);
827 // Test if operands are numbers (smi or HeapNumber objects), and load
828 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
829 // either operand is not a number. Operands are in edx and eax.
830 // Leaves operands unchanged.
831 static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
832
833 // Similar to LoadSSE2Operands but assumes that both operands are smis.
834 // Expects operands in edx, eax.
835 static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
836 };
837
838
GetName()839 const char* GenericBinaryOpStub::GetName() {
840 if (name_ != NULL) return name_;
841 const int kMaxNameLength = 100;
842 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
843 if (name_ == NULL) return "OOM";
844 const char* op_name = Token::Name(op_);
845 const char* overwrite_name;
846 switch (mode_) {
847 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
848 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
849 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
850 default: overwrite_name = "UnknownOverwrite"; break;
851 }
852
853 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
854 "GenericBinaryOpStub_%s_%s%s_%s%s_%s",
855 op_name,
856 overwrite_name,
857 (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
858 args_in_registers_ ? "RegArgs" : "StackArgs",
859 args_reversed_ ? "_R" : "",
860 NumberInfo::ToString(operands_type_));
861 return name_;
862 }
863
864
865 // Call the specialized stub for a binary operation.
866 class DeferredInlineBinaryOperation: public DeferredCode {
867 public:
DeferredInlineBinaryOperation(Token::Value op,Register dst,Register left,Register right,OverwriteMode mode)868 DeferredInlineBinaryOperation(Token::Value op,
869 Register dst,
870 Register left,
871 Register right,
872 OverwriteMode mode)
873 : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
874 set_comment("[ DeferredInlineBinaryOperation");
875 }
876
877 virtual void Generate();
878
879 private:
880 Token::Value op_;
881 Register dst_;
882 Register left_;
883 Register right_;
884 OverwriteMode mode_;
885 };
886
887
Generate()888 void DeferredInlineBinaryOperation::Generate() {
889 Label done;
890 if (CpuFeatures::IsSupported(SSE2) && ((op_ == Token::ADD) ||
891 (op_ ==Token::SUB) ||
892 (op_ == Token::MUL) ||
893 (op_ == Token::DIV))) {
894 CpuFeatures::Scope use_sse2(SSE2);
895 Label call_runtime, after_alloc_failure;
896 Label left_smi, right_smi, load_right, do_op;
897 __ test(left_, Immediate(kSmiTagMask));
898 __ j(zero, &left_smi);
899 __ cmp(FieldOperand(left_, HeapObject::kMapOffset),
900 Factory::heap_number_map());
901 __ j(not_equal, &call_runtime);
902 __ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
903 if (mode_ == OVERWRITE_LEFT) {
904 __ mov(dst_, left_);
905 }
906 __ jmp(&load_right);
907
908 __ bind(&left_smi);
909 __ SmiUntag(left_);
910 __ cvtsi2sd(xmm0, Operand(left_));
911 __ SmiTag(left_);
912 if (mode_ == OVERWRITE_LEFT) {
913 Label alloc_failure;
914 __ push(left_);
915 __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
916 __ pop(left_);
917 }
918
919 __ bind(&load_right);
920 __ test(right_, Immediate(kSmiTagMask));
921 __ j(zero, &right_smi);
922 __ cmp(FieldOperand(right_, HeapObject::kMapOffset),
923 Factory::heap_number_map());
924 __ j(not_equal, &call_runtime);
925 __ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
926 if (mode_ == OVERWRITE_RIGHT) {
927 __ mov(dst_, right_);
928 } else if (mode_ == NO_OVERWRITE) {
929 Label alloc_failure;
930 __ push(left_);
931 __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
932 __ pop(left_);
933 }
934 __ jmp(&do_op);
935
936 __ bind(&right_smi);
937 __ SmiUntag(right_);
938 __ cvtsi2sd(xmm1, Operand(right_));
939 __ SmiTag(right_);
940 if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
941 Label alloc_failure;
942 __ push(left_);
943 __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
944 __ pop(left_);
945 }
946
947 __ bind(&do_op);
948 switch (op_) {
949 case Token::ADD: __ addsd(xmm0, xmm1); break;
950 case Token::SUB: __ subsd(xmm0, xmm1); break;
951 case Token::MUL: __ mulsd(xmm0, xmm1); break;
952 case Token::DIV: __ divsd(xmm0, xmm1); break;
953 default: UNREACHABLE();
954 }
955 __ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
956 __ jmp(&done);
957
958 __ bind(&after_alloc_failure);
959 __ pop(left_);
960 __ bind(&call_runtime);
961 }
962 GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
963 stub.GenerateCall(masm_, left_, right_);
964 if (!dst_.is(eax)) __ mov(dst_, eax);
965 __ bind(&done);
966 }
967
968
GenericBinaryOperation(Token::Value op,StaticType * type,OverwriteMode overwrite_mode)969 void CodeGenerator::GenericBinaryOperation(Token::Value op,
970 StaticType* type,
971 OverwriteMode overwrite_mode) {
972 Comment cmnt(masm_, "[ BinaryOperation");
973 Comment cmnt_token(masm_, Token::String(op));
974
975 if (op == Token::COMMA) {
976 // Simply discard left value.
977 frame_->Nip(1);
978 return;
979 }
980
981 Result right = frame_->Pop();
982 Result left = frame_->Pop();
983
984 if (op == Token::ADD) {
985 bool left_is_string = left.is_constant() && left.handle()->IsString();
986 bool right_is_string = right.is_constant() && right.handle()->IsString();
987 if (left_is_string || right_is_string) {
988 frame_->Push(&left);
989 frame_->Push(&right);
990 Result answer;
991 if (left_is_string) {
992 if (right_is_string) {
993 // TODO(lrn): if both are constant strings
994 // -- do a compile time cons, if allocation during codegen is allowed.
995 answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
996 } else {
997 answer =
998 frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
999 }
1000 } else if (right_is_string) {
1001 answer =
1002 frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
1003 }
1004 frame_->Push(&answer);
1005 return;
1006 }
1007 // Neither operand is known to be a string.
1008 }
1009
1010 bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
1011 bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
1012 bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
1013 bool right_is_non_smi_constant =
1014 right.is_constant() && !right.handle()->IsSmi();
1015
1016 if (left_is_smi_constant && right_is_smi_constant) {
1017 // Compute the constant result at compile time, and leave it on the frame.
1018 int left_int = Smi::cast(*left.handle())->value();
1019 int right_int = Smi::cast(*right.handle())->value();
1020 if (FoldConstantSmis(op, left_int, right_int)) return;
1021 }
1022
1023 // Get number type of left and right sub-expressions.
1024 NumberInfo::Type operands_type =
1025 NumberInfo::Combine(left.number_info(), right.number_info());
1026
1027 Result answer;
1028 if (left_is_non_smi_constant || right_is_non_smi_constant) {
1029 // Go straight to the slow case, with no smi code.
1030 GenericBinaryOpStub stub(op,
1031 overwrite_mode,
1032 NO_SMI_CODE_IN_STUB,
1033 operands_type);
1034 answer = stub.GenerateCall(masm_, frame_, &left, &right);
1035 } else if (right_is_smi_constant) {
1036 answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
1037 type, false, overwrite_mode);
1038 } else if (left_is_smi_constant) {
1039 answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
1040 type, true, overwrite_mode);
1041 } else {
1042 // Set the flags based on the operation, type and loop nesting level.
1043 // Bit operations always assume they likely operate on Smis. Still only
1044 // generate the inline Smi check code if this operation is part of a loop.
1045 // For all other operations only inline the Smi check code for likely smis
1046 // if the operation is part of a loop.
1047 if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
1048 answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
1049 } else {
1050 GenericBinaryOpStub stub(op,
1051 overwrite_mode,
1052 NO_GENERIC_BINARY_FLAGS,
1053 operands_type);
1054 answer = stub.GenerateCall(masm_, frame_, &left, &right);
1055 }
1056 }
1057
1058 // Set NumberInfo of result according to the operation performed.
1059 // Rely on the fact that smis have a 31 bit payload on ia32.
1060 ASSERT(kSmiValueSize == 31);
1061 NumberInfo::Type result_type = NumberInfo::kUnknown;
1062 switch (op) {
1063 case Token::COMMA:
1064 result_type = right.number_info();
1065 break;
1066 case Token::OR:
1067 case Token::AND:
1068 // Result type can be either of the two input types.
1069 result_type = operands_type;
1070 break;
1071 case Token::BIT_OR:
1072 case Token::BIT_XOR:
1073 case Token::BIT_AND:
1074 // Result is always a number. Smi property of inputs is preserved.
1075 result_type = (operands_type == NumberInfo::kSmi)
1076 ? NumberInfo::kSmi
1077 : NumberInfo::kNumber;
1078 break;
1079 case Token::SAR:
1080 // Result is a smi if we shift by a constant >= 1, otherwise a number.
1081 result_type = (right.is_constant() && right.handle()->IsSmi()
1082 && Smi::cast(*right.handle())->value() >= 1)
1083 ? NumberInfo::kSmi
1084 : NumberInfo::kNumber;
1085 break;
1086 case Token::SHR:
1087 // Result is a smi if we shift by a constant >= 2, otherwise a number.
1088 result_type = (right.is_constant() && right.handle()->IsSmi()
1089 && Smi::cast(*right.handle())->value() >= 2)
1090 ? NumberInfo::kSmi
1091 : NumberInfo::kNumber;
1092 break;
1093 case Token::ADD:
1094 // Result could be a string or a number. Check types of inputs.
1095 result_type = NumberInfo::IsNumber(operands_type)
1096 ? NumberInfo::kNumber
1097 : NumberInfo::kUnknown;
1098 break;
1099 case Token::SHL:
1100 case Token::SUB:
1101 case Token::MUL:
1102 case Token::DIV:
1103 case Token::MOD:
1104 // Result is always a number.
1105 result_type = NumberInfo::kNumber;
1106 break;
1107 default:
1108 UNREACHABLE();
1109 }
1110 answer.set_number_info(result_type);
1111 frame_->Push(&answer);
1112 }
1113
1114
FoldConstantSmis(Token::Value op,int left,int right)1115 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
1116 Object* answer_object = Heap::undefined_value();
1117 switch (op) {
1118 case Token::ADD:
1119 if (Smi::IsValid(left + right)) {
1120 answer_object = Smi::FromInt(left + right);
1121 }
1122 break;
1123 case Token::SUB:
1124 if (Smi::IsValid(left - right)) {
1125 answer_object = Smi::FromInt(left - right);
1126 }
1127 break;
1128 case Token::MUL: {
1129 double answer = static_cast<double>(left) * right;
1130 if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
1131 // If the product is zero and the non-zero factor is negative,
1132 // the spec requires us to return floating point negative zero.
1133 if (answer != 0 || (left >= 0 && right >= 0)) {
1134 answer_object = Smi::FromInt(static_cast<int>(answer));
1135 }
1136 }
1137 }
1138 break;
1139 case Token::DIV:
1140 case Token::MOD:
1141 break;
1142 case Token::BIT_OR:
1143 answer_object = Smi::FromInt(left | right);
1144 break;
1145 case Token::BIT_AND:
1146 answer_object = Smi::FromInt(left & right);
1147 break;
1148 case Token::BIT_XOR:
1149 answer_object = Smi::FromInt(left ^ right);
1150 break;
1151
1152 case Token::SHL: {
1153 int shift_amount = right & 0x1F;
1154 if (Smi::IsValid(left << shift_amount)) {
1155 answer_object = Smi::FromInt(left << shift_amount);
1156 }
1157 break;
1158 }
1159 case Token::SHR: {
1160 int shift_amount = right & 0x1F;
1161 unsigned int unsigned_left = left;
1162 unsigned_left >>= shift_amount;
1163 if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
1164 answer_object = Smi::FromInt(unsigned_left);
1165 }
1166 break;
1167 }
1168 case Token::SAR: {
1169 int shift_amount = right & 0x1F;
1170 unsigned int unsigned_left = left;
1171 if (left < 0) {
1172 // Perform arithmetic shift of a negative number by
1173 // complementing number, logical shifting, complementing again.
1174 unsigned_left = ~unsigned_left;
1175 unsigned_left >>= shift_amount;
1176 unsigned_left = ~unsigned_left;
1177 } else {
1178 unsigned_left >>= shift_amount;
1179 }
1180 ASSERT(Smi::IsValid(unsigned_left)); // Converted to signed.
1181 answer_object = Smi::FromInt(unsigned_left); // Converted to signed.
1182 break;
1183 }
1184 default:
1185 UNREACHABLE();
1186 break;
1187 }
1188 if (answer_object == Heap::undefined_value()) {
1189 return false;
1190 }
1191 frame_->Push(Handle<Object>(answer_object));
1192 return true;
1193 }
1194
1195
1196 // Implements a binary operation using a deferred code object and some
1197 // inline code to operate on smis quickly.
LikelySmiBinaryOperation(Token::Value op,Result * left,Result * right,OverwriteMode overwrite_mode)1198 Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
1199 Result* left,
1200 Result* right,
1201 OverwriteMode overwrite_mode) {
1202 Result answer;
1203 // Special handling of div and mod because they use fixed registers.
1204 if (op == Token::DIV || op == Token::MOD) {
1205 // We need eax as the quotient register, edx as the remainder
1206 // register, neither left nor right in eax or edx, and left copied
1207 // to eax.
1208 Result quotient;
1209 Result remainder;
1210 bool left_is_in_eax = false;
1211 // Step 1: get eax for quotient.
1212 if ((left->is_register() && left->reg().is(eax)) ||
1213 (right->is_register() && right->reg().is(eax))) {
1214 // One or both is in eax. Use a fresh non-edx register for
1215 // them.
1216 Result fresh = allocator_->Allocate();
1217 ASSERT(fresh.is_valid());
1218 if (fresh.reg().is(edx)) {
1219 remainder = fresh;
1220 fresh = allocator_->Allocate();
1221 ASSERT(fresh.is_valid());
1222 }
1223 if (left->is_register() && left->reg().is(eax)) {
1224 quotient = *left;
1225 *left = fresh;
1226 left_is_in_eax = true;
1227 }
1228 if (right->is_register() && right->reg().is(eax)) {
1229 quotient = *right;
1230 *right = fresh;
1231 }
1232 __ mov(fresh.reg(), eax);
1233 } else {
1234 // Neither left nor right is in eax.
1235 quotient = allocator_->Allocate(eax);
1236 }
1237 ASSERT(quotient.is_register() && quotient.reg().is(eax));
1238 ASSERT(!(left->is_register() && left->reg().is(eax)));
1239 ASSERT(!(right->is_register() && right->reg().is(eax)));
1240
1241 // Step 2: get edx for remainder if necessary.
1242 if (!remainder.is_valid()) {
1243 if ((left->is_register() && left->reg().is(edx)) ||
1244 (right->is_register() && right->reg().is(edx))) {
1245 Result fresh = allocator_->Allocate();
1246 ASSERT(fresh.is_valid());
1247 if (left->is_register() && left->reg().is(edx)) {
1248 remainder = *left;
1249 *left = fresh;
1250 }
1251 if (right->is_register() && right->reg().is(edx)) {
1252 remainder = *right;
1253 *right = fresh;
1254 }
1255 __ mov(fresh.reg(), edx);
1256 } else {
1257 // Neither left nor right is in edx.
1258 remainder = allocator_->Allocate(edx);
1259 }
1260 }
1261 ASSERT(remainder.is_register() && remainder.reg().is(edx));
1262 ASSERT(!(left->is_register() && left->reg().is(edx)));
1263 ASSERT(!(right->is_register() && right->reg().is(edx)));
1264
1265 left->ToRegister();
1266 right->ToRegister();
1267 frame_->Spill(eax);
1268 frame_->Spill(edx);
1269
1270 // Check that left and right are smi tagged.
1271 DeferredInlineBinaryOperation* deferred =
1272 new DeferredInlineBinaryOperation(op,
1273 (op == Token::DIV) ? eax : edx,
1274 left->reg(),
1275 right->reg(),
1276 overwrite_mode);
1277 if (left->reg().is(right->reg())) {
1278 __ test(left->reg(), Immediate(kSmiTagMask));
1279 } else {
1280 // Use the quotient register as a scratch for the tag check.
1281 if (!left_is_in_eax) __ mov(eax, left->reg());
1282 left_is_in_eax = false; // About to destroy the value in eax.
1283 __ or_(eax, Operand(right->reg()));
1284 ASSERT(kSmiTag == 0); // Adjust test if not the case.
1285 __ test(eax, Immediate(kSmiTagMask));
1286 }
1287 deferred->Branch(not_zero);
1288
1289 if (!left_is_in_eax) __ mov(eax, left->reg());
1290 // Sign extend eax into edx:eax.
1291 __ cdq();
1292 // Check for 0 divisor.
1293 __ test(right->reg(), Operand(right->reg()));
1294 deferred->Branch(zero);
1295 // Divide edx:eax by the right operand.
1296 __ idiv(right->reg());
1297
1298 // Complete the operation.
1299 if (op == Token::DIV) {
1300 // Check for negative zero result. If result is zero, and divisor
1301 // is negative, return a floating point negative zero. The
1302 // virtual frame is unchanged in this block, so local control flow
1303 // can use a Label rather than a JumpTarget.
1304 Label non_zero_result;
1305 __ test(left->reg(), Operand(left->reg()));
1306 __ j(not_zero, &non_zero_result);
1307 __ test(right->reg(), Operand(right->reg()));
1308 deferred->Branch(negative);
1309 __ bind(&non_zero_result);
1310 // Check for the corner case of dividing the most negative smi by
1311 // -1. We cannot use the overflow flag, since it is not set by
1312 // idiv instruction.
1313 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
1314 __ cmp(eax, 0x40000000);
1315 deferred->Branch(equal);
1316 // Check that the remainder is zero.
1317 __ test(edx, Operand(edx));
1318 deferred->Branch(not_zero);
1319 // Tag the result and store it in the quotient register.
1320 __ SmiTag(eax);
1321 deferred->BindExit();
1322 left->Unuse();
1323 right->Unuse();
1324 answer = quotient;
1325 } else {
1326 ASSERT(op == Token::MOD);
1327 // Check for a negative zero result. If the result is zero, and
1328 // the dividend is negative, return a floating point negative
1329 // zero. The frame is unchanged in this block, so local control
1330 // flow can use a Label rather than a JumpTarget.
1331 Label non_zero_result;
1332 __ test(edx, Operand(edx));
1333 __ j(not_zero, &non_zero_result, taken);
1334 __ test(left->reg(), Operand(left->reg()));
1335 deferred->Branch(negative);
1336 __ bind(&non_zero_result);
1337 deferred->BindExit();
1338 left->Unuse();
1339 right->Unuse();
1340 answer = remainder;
1341 }
1342 ASSERT(answer.is_valid());
1343 return answer;
1344 }
1345
1346 // Special handling of shift operations because they use fixed
1347 // registers.
1348 if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
1349 // Move left out of ecx if necessary.
1350 if (left->is_register() && left->reg().is(ecx)) {
1351 *left = allocator_->Allocate();
1352 ASSERT(left->is_valid());
1353 __ mov(left->reg(), ecx);
1354 }
1355 right->ToRegister(ecx);
1356 left->ToRegister();
1357 ASSERT(left->is_register() && !left->reg().is(ecx));
1358 ASSERT(right->is_register() && right->reg().is(ecx));
1359
1360 // We will modify right, it must be spilled.
1361 frame_->Spill(ecx);
1362
1363 // Use a fresh answer register to avoid spilling the left operand.
1364 answer = allocator_->Allocate();
1365 ASSERT(answer.is_valid());
1366 // Check that both operands are smis using the answer register as a
1367 // temporary.
1368 DeferredInlineBinaryOperation* deferred =
1369 new DeferredInlineBinaryOperation(op,
1370 answer.reg(),
1371 left->reg(),
1372 ecx,
1373 overwrite_mode);
1374 __ mov(answer.reg(), left->reg());
1375 __ or_(answer.reg(), Operand(ecx));
1376 __ test(answer.reg(), Immediate(kSmiTagMask));
1377 deferred->Branch(not_zero);
1378
1379 // Untag both operands.
1380 __ mov(answer.reg(), left->reg());
1381 __ SmiUntag(answer.reg());
1382 __ SmiUntag(ecx);
1383 // Perform the operation.
1384 switch (op) {
1385 case Token::SAR:
1386 __ sar_cl(answer.reg());
1387 // No checks of result necessary
1388 break;
1389 case Token::SHR: {
1390 Label result_ok;
1391 __ shr_cl(answer.reg());
1392 // Check that the *unsigned* result fits in a smi. Neither of
1393 // the two high-order bits can be set:
1394 // * 0x80000000: high bit would be lost when smi tagging.
1395 // * 0x40000000: this number would convert to negative when smi
1396 // tagging.
1397 // These two cases can only happen with shifts by 0 or 1 when
1398 // handed a valid smi. If the answer cannot be represented by a
1399 // smi, restore the left and right arguments, and jump to slow
1400 // case. The low bit of the left argument may be lost, but only
1401 // in a case where it is dropped anyway.
1402 __ test(answer.reg(), Immediate(0xc0000000));
1403 __ j(zero, &result_ok);
1404 __ SmiTag(ecx);
1405 deferred->Jump();
1406 __ bind(&result_ok);
1407 break;
1408 }
1409 case Token::SHL: {
1410 Label result_ok;
1411 __ shl_cl(answer.reg());
1412 // Check that the *signed* result fits in a smi.
1413 __ cmp(answer.reg(), 0xc0000000);
1414 __ j(positive, &result_ok);
1415 __ SmiTag(ecx);
1416 deferred->Jump();
1417 __ bind(&result_ok);
1418 break;
1419 }
1420 default:
1421 UNREACHABLE();
1422 }
1423 // Smi-tag the result in answer.
1424 __ SmiTag(answer.reg());
1425 deferred->BindExit();
1426 left->Unuse();
1427 right->Unuse();
1428 ASSERT(answer.is_valid());
1429 return answer;
1430 }
1431
1432 // Handle the other binary operations.
1433 left->ToRegister();
1434 right->ToRegister();
1435 // A newly allocated register answer is used to hold the answer. The
1436 // registers containing left and right are not modified so they don't
1437 // need to be spilled in the fast case.
1438 answer = allocator_->Allocate();
1439 ASSERT(answer.is_valid());
1440
1441 // Perform the smi tag check.
1442 DeferredInlineBinaryOperation* deferred =
1443 new DeferredInlineBinaryOperation(op,
1444 answer.reg(),
1445 left->reg(),
1446 right->reg(),
1447 overwrite_mode);
1448 if (left->reg().is(right->reg())) {
1449 __ test(left->reg(), Immediate(kSmiTagMask));
1450 } else {
1451 __ mov(answer.reg(), left->reg());
1452 __ or_(answer.reg(), Operand(right->reg()));
1453 ASSERT(kSmiTag == 0); // Adjust test if not the case.
1454 __ test(answer.reg(), Immediate(kSmiTagMask));
1455 }
1456 deferred->Branch(not_zero);
1457 __ mov(answer.reg(), left->reg());
1458 switch (op) {
1459 case Token::ADD:
1460 __ add(answer.reg(), Operand(right->reg()));
1461 deferred->Branch(overflow);
1462 break;
1463
1464 case Token::SUB:
1465 __ sub(answer.reg(), Operand(right->reg()));
1466 deferred->Branch(overflow);
1467 break;
1468
1469 case Token::MUL: {
1470 // If the smi tag is 0 we can just leave the tag on one operand.
1471 ASSERT(kSmiTag == 0); // Adjust code below if not the case.
1472 // Remove smi tag from the left operand (but keep sign).
1473 // Left-hand operand has been copied into answer.
1474 __ SmiUntag(answer.reg());
1475 // Do multiplication of smis, leaving result in answer.
1476 __ imul(answer.reg(), Operand(right->reg()));
1477 // Go slow on overflows.
1478 deferred->Branch(overflow);
1479 // Check for negative zero result. If product is zero, and one
1480 // argument is negative, go to slow case. The frame is unchanged
1481 // in this block, so local control flow can use a Label rather
1482 // than a JumpTarget.
1483 Label non_zero_result;
1484 __ test(answer.reg(), Operand(answer.reg()));
1485 __ j(not_zero, &non_zero_result, taken);
1486 __ mov(answer.reg(), left->reg());
1487 __ or_(answer.reg(), Operand(right->reg()));
1488 deferred->Branch(negative);
1489 __ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct.
1490 __ bind(&non_zero_result);
1491 break;
1492 }
1493
1494 case Token::BIT_OR:
1495 __ or_(answer.reg(), Operand(right->reg()));
1496 break;
1497
1498 case Token::BIT_AND:
1499 __ and_(answer.reg(), Operand(right->reg()));
1500 break;
1501
1502 case Token::BIT_XOR:
1503 __ xor_(answer.reg(), Operand(right->reg()));
1504 break;
1505
1506 default:
1507 UNREACHABLE();
1508 break;
1509 }
1510 deferred->BindExit();
1511 left->Unuse();
1512 right->Unuse();
1513 ASSERT(answer.is_valid());
1514 return answer;
1515 }
1516
1517
1518 // Call the appropriate binary operation stub to compute src op value
1519 // and leave the result in dst.
1520 class DeferredInlineSmiOperation: public DeferredCode {
1521 public:
DeferredInlineSmiOperation(Token::Value op,Register dst,Register src,Smi * value,OverwriteMode overwrite_mode)1522 DeferredInlineSmiOperation(Token::Value op,
1523 Register dst,
1524 Register src,
1525 Smi* value,
1526 OverwriteMode overwrite_mode)
1527 : op_(op),
1528 dst_(dst),
1529 src_(src),
1530 value_(value),
1531 overwrite_mode_(overwrite_mode) {
1532 set_comment("[ DeferredInlineSmiOperation");
1533 }
1534
1535 virtual void Generate();
1536
1537 private:
1538 Token::Value op_;
1539 Register dst_;
1540 Register src_;
1541 Smi* value_;
1542 OverwriteMode overwrite_mode_;
1543 };
1544
1545
Generate()1546 void DeferredInlineSmiOperation::Generate() {
1547 // For mod we don't generate all the Smi code inline.
1548 GenericBinaryOpStub stub(
1549 op_,
1550 overwrite_mode_,
1551 (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
1552 stub.GenerateCall(masm_, src_, value_);
1553 if (!dst_.is(eax)) __ mov(dst_, eax);
1554 }
1555
1556
1557 // Call the appropriate binary operation stub to compute value op src
1558 // and leave the result in dst.
1559 class DeferredInlineSmiOperationReversed: public DeferredCode {
1560 public:
DeferredInlineSmiOperationReversed(Token::Value op,Register dst,Smi * value,Register src,OverwriteMode overwrite_mode)1561 DeferredInlineSmiOperationReversed(Token::Value op,
1562 Register dst,
1563 Smi* value,
1564 Register src,
1565 OverwriteMode overwrite_mode)
1566 : op_(op),
1567 dst_(dst),
1568 value_(value),
1569 src_(src),
1570 overwrite_mode_(overwrite_mode) {
1571 set_comment("[ DeferredInlineSmiOperationReversed");
1572 }
1573
1574 virtual void Generate();
1575
1576 private:
1577 Token::Value op_;
1578 Register dst_;
1579 Smi* value_;
1580 Register src_;
1581 OverwriteMode overwrite_mode_;
1582 };
1583
1584
Generate()1585 void DeferredInlineSmiOperationReversed::Generate() {
1586 GenericBinaryOpStub igostub(op_, overwrite_mode_, NO_SMI_CODE_IN_STUB);
1587 igostub.GenerateCall(masm_, value_, src_);
1588 if (!dst_.is(eax)) __ mov(dst_, eax);
1589 }
1590
1591
1592 // The result of src + value is in dst. It either overflowed or was not
1593 // smi tagged. Undo the speculative addition and call the appropriate
1594 // specialized stub for add. The result is left in dst.
1595 class DeferredInlineSmiAdd: public DeferredCode {
1596 public:
DeferredInlineSmiAdd(Register dst,Smi * value,OverwriteMode overwrite_mode)1597 DeferredInlineSmiAdd(Register dst,
1598 Smi* value,
1599 OverwriteMode overwrite_mode)
1600 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
1601 set_comment("[ DeferredInlineSmiAdd");
1602 }
1603
1604 virtual void Generate();
1605
1606 private:
1607 Register dst_;
1608 Smi* value_;
1609 OverwriteMode overwrite_mode_;
1610 };
1611
1612
Generate()1613 void DeferredInlineSmiAdd::Generate() {
1614 // Undo the optimistic add operation and call the shared stub.
1615 __ sub(Operand(dst_), Immediate(value_));
1616 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
1617 igostub.GenerateCall(masm_, dst_, value_);
1618 if (!dst_.is(eax)) __ mov(dst_, eax);
1619 }
1620
1621
1622 // The result of value + src is in dst. It either overflowed or was not
1623 // smi tagged. Undo the speculative addition and call the appropriate
1624 // specialized stub for add. The result is left in dst.
1625 class DeferredInlineSmiAddReversed: public DeferredCode {
1626 public:
DeferredInlineSmiAddReversed(Register dst,Smi * value,OverwriteMode overwrite_mode)1627 DeferredInlineSmiAddReversed(Register dst,
1628 Smi* value,
1629 OverwriteMode overwrite_mode)
1630 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
1631 set_comment("[ DeferredInlineSmiAddReversed");
1632 }
1633
1634 virtual void Generate();
1635
1636 private:
1637 Register dst_;
1638 Smi* value_;
1639 OverwriteMode overwrite_mode_;
1640 };
1641
1642
Generate()1643 void DeferredInlineSmiAddReversed::Generate() {
1644 // Undo the optimistic add operation and call the shared stub.
1645 __ sub(Operand(dst_), Immediate(value_));
1646 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
1647 igostub.GenerateCall(masm_, value_, dst_);
1648 if (!dst_.is(eax)) __ mov(dst_, eax);
1649 }
1650
1651
1652 // The result of src - value is in dst. It either overflowed or was not
1653 // smi tagged. Undo the speculative subtraction and call the
1654 // appropriate specialized stub for subtract. The result is left in
1655 // dst.
1656 class DeferredInlineSmiSub: public DeferredCode {
1657 public:
DeferredInlineSmiSub(Register dst,Smi * value,OverwriteMode overwrite_mode)1658 DeferredInlineSmiSub(Register dst,
1659 Smi* value,
1660 OverwriteMode overwrite_mode)
1661 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
1662 set_comment("[ DeferredInlineSmiSub");
1663 }
1664
1665 virtual void Generate();
1666
1667 private:
1668 Register dst_;
1669 Smi* value_;
1670 OverwriteMode overwrite_mode_;
1671 };
1672
1673
Generate()1674 void DeferredInlineSmiSub::Generate() {
1675 // Undo the optimistic sub operation and call the shared stub.
1676 __ add(Operand(dst_), Immediate(value_));
1677 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
1678 igostub.GenerateCall(masm_, dst_, value_);
1679 if (!dst_.is(eax)) __ mov(dst_, eax);
1680 }
1681
1682
ConstantSmiBinaryOperation(Token::Value op,Result * operand,Handle<Object> value,StaticType * type,bool reversed,OverwriteMode overwrite_mode)1683 Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
1684 Result* operand,
1685 Handle<Object> value,
1686 StaticType* type,
1687 bool reversed,
1688 OverwriteMode overwrite_mode) {
1689 // NOTE: This is an attempt to inline (a bit) more of the code for
1690 // some possible smi operations (like + and -) when (at least) one
1691 // of the operands is a constant smi.
1692 // Consumes the argument "operand".
1693 // TODO(199): Optimize some special cases of operations involving a
1694 // smi literal (multiply by 2, shift by 0, etc.).
1695 if (IsUnsafeSmi(value)) {
1696 Result unsafe_operand(value);
1697 if (reversed) {
1698 return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
1699 overwrite_mode);
1700 } else {
1701 return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
1702 overwrite_mode);
1703 }
1704 }
1705
1706 // Get the literal value.
1707 Smi* smi_value = Smi::cast(*value);
1708 int int_value = smi_value->value();
1709
1710 Result answer;
1711 switch (op) {
1712 case Token::ADD: {
1713 operand->ToRegister();
1714 frame_->Spill(operand->reg());
1715
1716 // Optimistically add. Call the specialized add stub if the
1717 // result is not a smi or overflows.
1718 DeferredCode* deferred = NULL;
1719 if (reversed) {
1720 deferred = new DeferredInlineSmiAddReversed(operand->reg(),
1721 smi_value,
1722 overwrite_mode);
1723 } else {
1724 deferred = new DeferredInlineSmiAdd(operand->reg(),
1725 smi_value,
1726 overwrite_mode);
1727 }
1728 __ add(Operand(operand->reg()), Immediate(value));
1729 deferred->Branch(overflow);
1730 __ test(operand->reg(), Immediate(kSmiTagMask));
1731 deferred->Branch(not_zero);
1732 deferred->BindExit();
1733 answer = *operand;
1734 break;
1735 }
1736
1737 case Token::SUB: {
1738 DeferredCode* deferred = NULL;
1739 if (reversed) {
1740 // The reversed case is only hit when the right operand is not a
1741 // constant.
1742 ASSERT(operand->is_register());
1743 answer = allocator()->Allocate();
1744 ASSERT(answer.is_valid());
1745 __ Set(answer.reg(), Immediate(value));
1746 deferred = new DeferredInlineSmiOperationReversed(op,
1747 answer.reg(),
1748 smi_value,
1749 operand->reg(),
1750 overwrite_mode);
1751 __ sub(answer.reg(), Operand(operand->reg()));
1752 } else {
1753 operand->ToRegister();
1754 frame_->Spill(operand->reg());
1755 answer = *operand;
1756 deferred = new DeferredInlineSmiSub(operand->reg(),
1757 smi_value,
1758 overwrite_mode);
1759 __ sub(Operand(operand->reg()), Immediate(value));
1760 }
1761 deferred->Branch(overflow);
1762 __ test(answer.reg(), Immediate(kSmiTagMask));
1763 deferred->Branch(not_zero);
1764 deferred->BindExit();
1765 operand->Unuse();
1766 break;
1767 }
1768
1769 case Token::SAR:
1770 if (reversed) {
1771 Result constant_operand(value);
1772 answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
1773 overwrite_mode);
1774 } else {
1775 // Only the least significant 5 bits of the shift value are used.
1776 // In the slow case, this masking is done inside the runtime call.
1777 int shift_value = int_value & 0x1f;
1778 operand->ToRegister();
1779 frame_->Spill(operand->reg());
1780 DeferredInlineSmiOperation* deferred =
1781 new DeferredInlineSmiOperation(op,
1782 operand->reg(),
1783 operand->reg(),
1784 smi_value,
1785 overwrite_mode);
1786 __ test(operand->reg(), Immediate(kSmiTagMask));
1787 deferred->Branch(not_zero);
1788 if (shift_value > 0) {
1789 __ sar(operand->reg(), shift_value);
1790 __ and_(operand->reg(), ~kSmiTagMask);
1791 }
1792 deferred->BindExit();
1793 answer = *operand;
1794 }
1795 break;
1796
1797 case Token::SHR:
1798 if (reversed) {
1799 Result constant_operand(value);
1800 answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
1801 overwrite_mode);
1802 } else {
1803 // Only the least significant 5 bits of the shift value are used.
1804 // In the slow case, this masking is done inside the runtime call.
1805 int shift_value = int_value & 0x1f;
1806 operand->ToRegister();
1807 answer = allocator()->Allocate();
1808 ASSERT(answer.is_valid());
1809 DeferredInlineSmiOperation* deferred =
1810 new DeferredInlineSmiOperation(op,
1811 answer.reg(),
1812 operand->reg(),
1813 smi_value,
1814 overwrite_mode);
1815 __ test(operand->reg(), Immediate(kSmiTagMask));
1816 deferred->Branch(not_zero);
1817 __ mov(answer.reg(), operand->reg());
1818 __ SmiUntag(answer.reg());
1819 __ shr(answer.reg(), shift_value);
1820 // A negative Smi shifted right two is in the positive Smi range.
1821 if (shift_value < 2) {
1822 __ test(answer.reg(), Immediate(0xc0000000));
1823 deferred->Branch(not_zero);
1824 }
1825 operand->Unuse();
1826 __ SmiTag(answer.reg());
1827 deferred->BindExit();
1828 }
1829 break;
1830
1831 case Token::SHL:
1832 if (reversed) {
1833 Result right;
1834 Result right_copy_in_ecx;
1835
1836 // Make sure to get a copy of the right operand into ecx. This
1837 // allows us to modify it without having to restore it in the
1838 // deferred code.
1839 operand->ToRegister();
1840 if (operand->reg().is(ecx)) {
1841 right = allocator()->Allocate();
1842 __ mov(right.reg(), ecx);
1843 frame_->Spill(ecx);
1844 right_copy_in_ecx = *operand;
1845 } else {
1846 right_copy_in_ecx = allocator()->Allocate(ecx);
1847 __ mov(ecx, operand->reg());
1848 right = *operand;
1849 }
1850 operand->Unuse();
1851
1852 answer = allocator()->Allocate();
1853 DeferredInlineSmiOperationReversed* deferred =
1854 new DeferredInlineSmiOperationReversed(op,
1855 answer.reg(),
1856 smi_value,
1857 right.reg(),
1858 overwrite_mode);
1859 __ mov(answer.reg(), Immediate(int_value));
1860 __ sar(ecx, kSmiTagSize);
1861 deferred->Branch(carry);
1862 __ shl_cl(answer.reg());
1863 __ cmp(answer.reg(), 0xc0000000);
1864 deferred->Branch(sign);
1865 __ SmiTag(answer.reg());
1866
1867 deferred->BindExit();
1868 } else {
1869 // Only the least significant 5 bits of the shift value are used.
1870 // In the slow case, this masking is done inside the runtime call.
1871 int shift_value = int_value & 0x1f;
1872 operand->ToRegister();
1873 if (shift_value == 0) {
1874 // Spill operand so it can be overwritten in the slow case.
1875 frame_->Spill(operand->reg());
1876 DeferredInlineSmiOperation* deferred =
1877 new DeferredInlineSmiOperation(op,
1878 operand->reg(),
1879 operand->reg(),
1880 smi_value,
1881 overwrite_mode);
1882 __ test(operand->reg(), Immediate(kSmiTagMask));
1883 deferred->Branch(not_zero);
1884 deferred->BindExit();
1885 answer = *operand;
1886 } else {
1887 // Use a fresh temporary for nonzero shift values.
1888 answer = allocator()->Allocate();
1889 ASSERT(answer.is_valid());
1890 DeferredInlineSmiOperation* deferred =
1891 new DeferredInlineSmiOperation(op,
1892 answer.reg(),
1893 operand->reg(),
1894 smi_value,
1895 overwrite_mode);
1896 __ test(operand->reg(), Immediate(kSmiTagMask));
1897 deferred->Branch(not_zero);
1898 __ mov(answer.reg(), operand->reg());
1899 ASSERT(kSmiTag == 0); // adjust code if not the case
1900 // We do no shifts, only the Smi conversion, if shift_value is 1.
1901 if (shift_value > 1) {
1902 __ shl(answer.reg(), shift_value - 1);
1903 }
1904 // Convert int result to Smi, checking that it is in int range.
1905 ASSERT(kSmiTagSize == 1); // adjust code if not the case
1906 __ add(answer.reg(), Operand(answer.reg()));
1907 deferred->Branch(overflow);
1908 deferred->BindExit();
1909 operand->Unuse();
1910 }
1911 }
1912 break;
1913
1914 case Token::BIT_OR:
1915 case Token::BIT_XOR:
1916 case Token::BIT_AND: {
1917 operand->ToRegister();
1918 frame_->Spill(operand->reg());
1919 DeferredCode* deferred = NULL;
1920 if (reversed) {
1921 deferred = new DeferredInlineSmiOperationReversed(op,
1922 operand->reg(),
1923 smi_value,
1924 operand->reg(),
1925 overwrite_mode);
1926 } else {
1927 deferred = new DeferredInlineSmiOperation(op,
1928 operand->reg(),
1929 operand->reg(),
1930 smi_value,
1931 overwrite_mode);
1932 }
1933 __ test(operand->reg(), Immediate(kSmiTagMask));
1934 deferred->Branch(not_zero);
1935 if (op == Token::BIT_AND) {
1936 __ and_(Operand(operand->reg()), Immediate(value));
1937 } else if (op == Token::BIT_XOR) {
1938 if (int_value != 0) {
1939 __ xor_(Operand(operand->reg()), Immediate(value));
1940 }
1941 } else {
1942 ASSERT(op == Token::BIT_OR);
1943 if (int_value != 0) {
1944 __ or_(Operand(operand->reg()), Immediate(value));
1945 }
1946 }
1947 deferred->BindExit();
1948 answer = *operand;
1949 break;
1950 }
1951
1952 case Token::DIV:
1953 if (!reversed && int_value == 2) {
1954 operand->ToRegister();
1955 frame_->Spill(operand->reg());
1956
1957 DeferredInlineSmiOperation* deferred =
1958 new DeferredInlineSmiOperation(op,
1959 operand->reg(),
1960 operand->reg(),
1961 smi_value,
1962 overwrite_mode);
1963 // Check that lowest log2(value) bits of operand are zero, and test
1964 // smi tag at the same time.
1965 ASSERT_EQ(0, kSmiTag);
1966 ASSERT_EQ(1, kSmiTagSize);
1967 __ test(operand->reg(), Immediate(3));
1968 deferred->Branch(not_zero); // Branch if non-smi or odd smi.
1969 __ sar(operand->reg(), 1);
1970 deferred->BindExit();
1971 answer = *operand;
1972 } else {
1973 // Cannot fall through MOD to default case, so we duplicate the
1974 // default case here.
1975 Result constant_operand(value);
1976 if (reversed) {
1977 answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
1978 overwrite_mode);
1979 } else {
1980 answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
1981 overwrite_mode);
1982 }
1983 }
1984 break;
1985 // Generate inline code for mod of powers of 2 and negative powers of 2.
1986 case Token::MOD:
1987 if (!reversed &&
1988 int_value != 0 &&
1989 (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
1990 operand->ToRegister();
1991 frame_->Spill(operand->reg());
1992 DeferredCode* deferred = new DeferredInlineSmiOperation(op,
1993 operand->reg(),
1994 operand->reg(),
1995 smi_value,
1996 overwrite_mode);
1997 // Check for negative or non-Smi left hand side.
1998 __ test(operand->reg(), Immediate(kSmiTagMask | 0x80000000));
1999 deferred->Branch(not_zero);
2000 if (int_value < 0) int_value = -int_value;
2001 if (int_value == 1) {
2002 __ mov(operand->reg(), Immediate(Smi::FromInt(0)));
2003 } else {
2004 __ and_(operand->reg(), (int_value << kSmiTagSize) - 1);
2005 }
2006 deferred->BindExit();
2007 answer = *operand;
2008 break;
2009 }
2010 // Fall through if we did not find a power of 2 on the right hand side!
2011
2012 default: {
2013 Result constant_operand(value);
2014 if (reversed) {
2015 answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
2016 overwrite_mode);
2017 } else {
2018 answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
2019 overwrite_mode);
2020 }
2021 break;
2022 }
2023 }
2024 ASSERT(answer.is_valid());
2025 return answer;
2026 }
2027
2028
CouldBeNaN(const Result & result)2029 static bool CouldBeNaN(const Result& result) {
2030 if (!result.is_constant()) return true;
2031 if (!result.handle()->IsHeapNumber()) return false;
2032 return isnan(HeapNumber::cast(*result.handle())->value());
2033 }
2034
2035
Comparison(AstNode * node,Condition cc,bool strict,ControlDestination * dest)2036 void CodeGenerator::Comparison(AstNode* node,
2037 Condition cc,
2038 bool strict,
2039 ControlDestination* dest) {
2040 // Strict only makes sense for equality comparisons.
2041 ASSERT(!strict || cc == equal);
2042
2043 Result left_side;
2044 Result right_side;
2045 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
2046 if (cc == greater || cc == less_equal) {
2047 cc = ReverseCondition(cc);
2048 left_side = frame_->Pop();
2049 right_side = frame_->Pop();
2050 } else {
2051 right_side = frame_->Pop();
2052 left_side = frame_->Pop();
2053 }
2054 ASSERT(cc == less || cc == equal || cc == greater_equal);
2055
2056 // If either side is a constant of some sort, we can probably optimize the
2057 // comparison.
2058 bool left_side_constant_smi = false;
2059 bool left_side_constant_null = false;
2060 bool left_side_constant_1_char_string = false;
2061 if (left_side.is_constant()) {
2062 left_side_constant_smi = left_side.handle()->IsSmi();
2063 left_side_constant_null = left_side.handle()->IsNull();
2064 left_side_constant_1_char_string =
2065 (left_side.handle()->IsString() &&
2066 (String::cast(*left_side.handle())->length() == 1));
2067 }
2068 bool right_side_constant_smi = false;
2069 bool right_side_constant_null = false;
2070 bool right_side_constant_1_char_string = false;
2071 if (right_side.is_constant()) {
2072 right_side_constant_smi = right_side.handle()->IsSmi();
2073 right_side_constant_null = right_side.handle()->IsNull();
2074 right_side_constant_1_char_string =
2075 (right_side.handle()->IsString() &&
2076 (String::cast(*right_side.handle())->length() == 1));
2077 }
2078
2079 if (left_side_constant_smi || right_side_constant_smi) {
2080 if (left_side_constant_smi && right_side_constant_smi) {
2081 // Trivial case, comparing two constants.
2082 int left_value = Smi::cast(*left_side.handle())->value();
2083 int right_value = Smi::cast(*right_side.handle())->value();
2084 switch (cc) {
2085 case less:
2086 dest->Goto(left_value < right_value);
2087 break;
2088 case equal:
2089 dest->Goto(left_value == right_value);
2090 break;
2091 case greater_equal:
2092 dest->Goto(left_value >= right_value);
2093 break;
2094 default:
2095 UNREACHABLE();
2096 }
2097 } else {
2098 // Only one side is a constant Smi.
2099 // If left side is a constant Smi, reverse the operands.
2100 // Since one side is a constant Smi, conversion order does not matter.
2101 if (left_side_constant_smi) {
2102 Result temp = left_side;
2103 left_side = right_side;
2104 right_side = temp;
2105 cc = ReverseCondition(cc);
2106 // This may reintroduce greater or less_equal as the value of cc.
2107 // CompareStub and the inline code both support all values of cc.
2108 }
2109 // Implement comparison against a constant Smi, inlining the case
2110 // where both sides are Smis.
2111 left_side.ToRegister();
2112 Register left_reg = left_side.reg();
2113 Handle<Object> right_val = right_side.handle();
2114
2115 // Here we split control flow to the stub call and inlined cases
2116 // before finally splitting it to the control destination. We use
2117 // a jump target and branching to duplicate the virtual frame at
2118 // the first split. We manually handle the off-frame references
2119 // by reconstituting them on the non-fall-through path.
2120 JumpTarget is_smi;
2121 __ test(left_side.reg(), Immediate(kSmiTagMask));
2122 is_smi.Branch(zero, taken);
2123
2124 bool is_for_loop_compare = (node->AsCompareOperation() != NULL)
2125 && node->AsCompareOperation()->is_for_loop_condition();
2126 if (!is_for_loop_compare
2127 && CpuFeatures::IsSupported(SSE2)
2128 && right_val->IsSmi()) {
2129 // Right side is a constant smi and left side has been checked
2130 // not to be a smi.
2131 CpuFeatures::Scope use_sse2(SSE2);
2132 JumpTarget not_number;
2133 __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
2134 Immediate(Factory::heap_number_map()));
2135 not_number.Branch(not_equal, &left_side);
2136 __ movdbl(xmm1,
2137 FieldOperand(left_reg, HeapNumber::kValueOffset));
2138 int value = Smi::cast(*right_val)->value();
2139 if (value == 0) {
2140 __ xorpd(xmm0, xmm0);
2141 } else {
2142 Result temp = allocator()->Allocate();
2143 __ mov(temp.reg(), Immediate(value));
2144 __ cvtsi2sd(xmm0, Operand(temp.reg()));
2145 temp.Unuse();
2146 }
2147 __ comisd(xmm1, xmm0);
2148 // Jump to builtin for NaN.
2149 not_number.Branch(parity_even, &left_side);
2150 left_side.Unuse();
2151 Condition double_cc = cc;
2152 switch (cc) {
2153 case less: double_cc = below; break;
2154 case equal: double_cc = equal; break;
2155 case less_equal: double_cc = below_equal; break;
2156 case greater: double_cc = above; break;
2157 case greater_equal: double_cc = above_equal; break;
2158 default: UNREACHABLE();
2159 }
2160 dest->true_target()->Branch(double_cc);
2161 dest->false_target()->Jump();
2162 not_number.Bind(&left_side);
2163 }
2164
2165 // Setup and call the compare stub.
2166 CompareStub stub(cc, strict, kCantBothBeNaN);
2167 Result result = frame_->CallStub(&stub, &left_side, &right_side);
2168 result.ToRegister();
2169 __ cmp(result.reg(), 0);
2170 result.Unuse();
2171 dest->true_target()->Branch(cc);
2172 dest->false_target()->Jump();
2173
2174 is_smi.Bind();
2175 left_side = Result(left_reg);
2176 right_side = Result(right_val);
2177 // Test smi equality and comparison by signed int comparison.
2178 if (IsUnsafeSmi(right_side.handle())) {
2179 right_side.ToRegister();
2180 __ cmp(left_side.reg(), Operand(right_side.reg()));
2181 } else {
2182 __ cmp(Operand(left_side.reg()), Immediate(right_side.handle()));
2183 }
2184 left_side.Unuse();
2185 right_side.Unuse();
2186 dest->Split(cc);
2187 }
2188
2189 } else if (cc == equal &&
2190 (left_side_constant_null || right_side_constant_null)) {
2191 // To make null checks efficient, we check if either the left side or
2192 // the right side is the constant 'null'.
2193 // If so, we optimize the code by inlining a null check instead of
2194 // calling the (very) general runtime routine for checking equality.
2195 Result operand = left_side_constant_null ? right_side : left_side;
2196 right_side.Unuse();
2197 left_side.Unuse();
2198 operand.ToRegister();
2199 __ cmp(operand.reg(), Factory::null_value());
2200 if (strict) {
2201 operand.Unuse();
2202 dest->Split(equal);
2203 } else {
2204 // The 'null' value is only equal to 'undefined' if using non-strict
2205 // comparisons.
2206 dest->true_target()->Branch(equal);
2207 __ cmp(operand.reg(), Factory::undefined_value());
2208 dest->true_target()->Branch(equal);
2209 __ test(operand.reg(), Immediate(kSmiTagMask));
2210 dest->false_target()->Branch(equal);
2211
2212 // It can be an undetectable object.
2213 // Use a scratch register in preference to spilling operand.reg().
2214 Result temp = allocator()->Allocate();
2215 ASSERT(temp.is_valid());
2216 __ mov(temp.reg(),
2217 FieldOperand(operand.reg(), HeapObject::kMapOffset));
2218 __ movzx_b(temp.reg(),
2219 FieldOperand(temp.reg(), Map::kBitFieldOffset));
2220 __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
2221 temp.Unuse();
2222 operand.Unuse();
2223 dest->Split(not_zero);
2224 }
2225 } else if (left_side_constant_1_char_string ||
2226 right_side_constant_1_char_string) {
2227 if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
2228 // Trivial case, comparing two constants.
2229 int left_value = String::cast(*left_side.handle())->Get(0);
2230 int right_value = String::cast(*right_side.handle())->Get(0);
2231 switch (cc) {
2232 case less:
2233 dest->Goto(left_value < right_value);
2234 break;
2235 case equal:
2236 dest->Goto(left_value == right_value);
2237 break;
2238 case greater_equal:
2239 dest->Goto(left_value >= right_value);
2240 break;
2241 default:
2242 UNREACHABLE();
2243 }
2244 } else {
2245 // Only one side is a constant 1 character string.
2246 // If left side is a constant 1-character string, reverse the operands.
2247 // Since one side is a constant string, conversion order does not matter.
2248 if (left_side_constant_1_char_string) {
2249 Result temp = left_side;
2250 left_side = right_side;
2251 right_side = temp;
2252 cc = ReverseCondition(cc);
2253 // This may reintroduce greater or less_equal as the value of cc.
2254 // CompareStub and the inline code both support all values of cc.
2255 }
2256 // Implement comparison against a constant string, inlining the case
2257 // where both sides are strings.
2258 left_side.ToRegister();
2259
2260 // Here we split control flow to the stub call and inlined cases
2261 // before finally splitting it to the control destination. We use
2262 // a jump target and branching to duplicate the virtual frame at
2263 // the first split. We manually handle the off-frame references
2264 // by reconstituting them on the non-fall-through path.
2265 JumpTarget is_not_string, is_string;
2266 Register left_reg = left_side.reg();
2267 Handle<Object> right_val = right_side.handle();
2268 __ test(left_side.reg(), Immediate(kSmiTagMask));
2269 is_not_string.Branch(zero, &left_side);
2270 Result temp = allocator_->Allocate();
2271 ASSERT(temp.is_valid());
2272 __ mov(temp.reg(),
2273 FieldOperand(left_side.reg(), HeapObject::kMapOffset));
2274 __ movzx_b(temp.reg(),
2275 FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
2276 // If we are testing for equality then make use of the symbol shortcut.
2277 // Check if the right left hand side has the same type as the left hand
2278 // side (which is always a symbol).
2279 if (cc == equal) {
2280 Label not_a_symbol;
2281 ASSERT(kSymbolTag != 0);
2282 // Ensure that no non-strings have the symbol bit set.
2283 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
2284 __ test(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
2285 __ j(zero, ¬_a_symbol);
2286 // They are symbols, so do identity compare.
2287 __ cmp(left_side.reg(), right_side.handle());
2288 dest->true_target()->Branch(equal);
2289 dest->false_target()->Branch(not_equal);
2290 __ bind(¬_a_symbol);
2291 }
2292 // If the receiver is not a string of the type we handle call the stub.
2293 __ and_(temp.reg(),
2294 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
2295 __ cmp(temp.reg(), kStringTag | kSeqStringTag | kAsciiStringTag);
2296 temp.Unuse();
2297 is_string.Branch(equal, &left_side);
2298
2299 // Setup and call the compare stub.
2300 is_not_string.Bind(&left_side);
2301 CompareStub stub(cc, strict, kCantBothBeNaN);
2302 Result result = frame_->CallStub(&stub, &left_side, &right_side);
2303 result.ToRegister();
2304 __ cmp(result.reg(), 0);
2305 result.Unuse();
2306 dest->true_target()->Branch(cc);
2307 dest->false_target()->Jump();
2308
2309 is_string.Bind(&left_side);
2310 // Here we know we have a sequential ASCII string.
2311 left_side = Result(left_reg);
2312 right_side = Result(right_val);
2313 Result temp2 = allocator_->Allocate();
2314 ASSERT(temp2.is_valid());
2315 // Test string equality and comparison.
2316 if (cc == equal) {
2317 Label comparison_done;
2318 __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
2319 Immediate(1));
2320 __ j(not_equal, &comparison_done);
2321 uint8_t char_value =
2322 static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
2323 __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
2324 char_value);
2325 __ bind(&comparison_done);
2326 } else {
2327 __ mov(temp2.reg(),
2328 FieldOperand(left_side.reg(), String::kLengthOffset));
2329 __ sub(Operand(temp2.reg()), Immediate(1));
2330 Label comparison;
2331 // If the length is 0 then our subtraction gave -1 which compares less
2332 // than any character.
2333 __ j(negative, &comparison);
2334 // Otherwise load the first character.
2335 __ movzx_b(temp2.reg(),
2336 FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
2337 __ bind(&comparison);
2338 // Compare the first character of the string with out constant
2339 // 1-character string.
2340 uint8_t char_value =
2341 static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
2342 __ cmp(Operand(temp2.reg()), Immediate(char_value));
2343 Label characters_were_different;
2344 __ j(not_equal, &characters_were_different);
2345 // If the first character is the same then the long string sorts after
2346 // the short one.
2347 __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
2348 Immediate(1));
2349 __ bind(&characters_were_different);
2350 }
2351 temp2.Unuse();
2352 left_side.Unuse();
2353 right_side.Unuse();
2354 dest->Split(cc);
2355 }
2356 } else {
2357 // Neither side is a constant Smi or null.
2358 // If either side is a non-smi constant, skip the smi check.
2359 bool known_non_smi =
2360 (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
2361 (right_side.is_constant() && !right_side.handle()->IsSmi());
2362 NaNInformation nan_info =
2363 (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
2364 kBothCouldBeNaN :
2365 kCantBothBeNaN;
2366 left_side.ToRegister();
2367 right_side.ToRegister();
2368
2369 if (known_non_smi) {
2370 // When non-smi, call out to the compare stub.
2371 CompareStub stub(cc, strict, nan_info);
2372 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
2373 if (cc == equal) {
2374 __ test(answer.reg(), Operand(answer.reg()));
2375 } else {
2376 __ cmp(answer.reg(), 0);
2377 }
2378 answer.Unuse();
2379 dest->Split(cc);
2380 } else {
2381 // Here we split control flow to the stub call and inlined cases
2382 // before finally splitting it to the control destination. We use
2383 // a jump target and branching to duplicate the virtual frame at
2384 // the first split. We manually handle the off-frame references
2385 // by reconstituting them on the non-fall-through path.
2386 JumpTarget is_smi;
2387 Register left_reg = left_side.reg();
2388 Register right_reg = right_side.reg();
2389
2390 Result temp = allocator_->Allocate();
2391 ASSERT(temp.is_valid());
2392 __ mov(temp.reg(), left_side.reg());
2393 __ or_(temp.reg(), Operand(right_side.reg()));
2394 __ test(temp.reg(), Immediate(kSmiTagMask));
2395 temp.Unuse();
2396 is_smi.Branch(zero, taken);
2397 // When non-smi, call out to the compare stub.
2398 CompareStub stub(cc, strict, nan_info);
2399 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
2400 if (cc == equal) {
2401 __ test(answer.reg(), Operand(answer.reg()));
2402 } else {
2403 __ cmp(answer.reg(), 0);
2404 }
2405 answer.Unuse();
2406 dest->true_target()->Branch(cc);
2407 dest->false_target()->Jump();
2408
2409 is_smi.Bind();
2410 left_side = Result(left_reg);
2411 right_side = Result(right_reg);
2412 __ cmp(left_side.reg(), Operand(right_side.reg()));
2413 right_side.Unuse();
2414 left_side.Unuse();
2415 dest->Split(cc);
2416 }
2417 }
2418 }
2419
2420
2421 // Call the function just below TOS on the stack with the given
2422 // arguments. The receiver is the TOS.
CallWithArguments(ZoneList<Expression * > * args,CallFunctionFlags flags,int position)2423 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
2424 CallFunctionFlags flags,
2425 int position) {
2426 // Push the arguments ("left-to-right") on the stack.
2427 int arg_count = args->length();
2428 for (int i = 0; i < arg_count; i++) {
2429 Load(args->at(i));
2430 }
2431
2432 // Record the position for debugging purposes.
2433 CodeForSourcePosition(position);
2434
2435 // Use the shared code stub to call the function.
2436 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
2437 CallFunctionStub call_function(arg_count, in_loop, flags);
2438 Result answer = frame_->CallStub(&call_function, arg_count + 1);
2439 // Restore context and replace function on the stack with the
2440 // result of the stub invocation.
2441 frame_->RestoreContextRegister();
2442 frame_->SetElementAt(0, &answer);
2443 }
2444
2445
CallApplyLazy(Expression * applicand,Expression * receiver,VariableProxy * arguments,int position)2446 void CodeGenerator::CallApplyLazy(Expression* applicand,
2447 Expression* receiver,
2448 VariableProxy* arguments,
2449 int position) {
2450 // An optimized implementation of expressions of the form
2451 // x.apply(y, arguments).
2452 // If the arguments object of the scope has not been allocated,
2453 // and x.apply is Function.prototype.apply, this optimization
2454 // just copies y and the arguments of the current function on the
2455 // stack, as receiver and arguments, and calls x.
2456 // In the implementation comments, we call x the applicand
2457 // and y the receiver.
2458 ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
2459 ASSERT(arguments->IsArguments());
2460
2461 // Load applicand.apply onto the stack. This will usually
2462 // give us a megamorphic load site. Not super, but it works.
2463 Load(applicand);
2464 frame()->Dup();
2465 Handle<String> name = Factory::LookupAsciiSymbol("apply");
2466 frame()->Push(name);
2467 Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
2468 __ nop();
2469 frame()->Push(&answer);
2470
2471 // Load the receiver and the existing arguments object onto the
2472 // expression stack. Avoid allocating the arguments object here.
2473 Load(receiver);
2474 Result existing_args =
2475 LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
2476 frame()->Push(&existing_args);
2477
2478 // Emit the source position information after having loaded the
2479 // receiver and the arguments.
2480 CodeForSourcePosition(position);
2481 // Contents of frame at this point:
2482 // Frame[0]: arguments object of the current function or the hole.
2483 // Frame[1]: receiver
2484 // Frame[2]: applicand.apply
2485 // Frame[3]: applicand.
2486
2487 // Check if the arguments object has been lazily allocated
2488 // already. If so, just use that instead of copying the arguments
2489 // from the stack. This also deals with cases where a local variable
2490 // named 'arguments' has been introduced.
2491 frame_->Dup();
2492 Result probe = frame_->Pop();
2493 { VirtualFrame::SpilledScope spilled_scope;
2494 Label slow, done;
2495 bool try_lazy = true;
2496 if (probe.is_constant()) {
2497 try_lazy = probe.handle()->IsTheHole();
2498 } else {
2499 __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
2500 probe.Unuse();
2501 __ j(not_equal, &slow);
2502 }
2503
2504 if (try_lazy) {
2505 Label build_args;
2506 // Get rid of the arguments object probe.
2507 frame_->Drop(); // Can be called on a spilled frame.
2508 // Stack now has 3 elements on it.
2509 // Contents of stack at this point:
2510 // esp[0]: receiver
2511 // esp[1]: applicand.apply
2512 // esp[2]: applicand.
2513
2514 // Check that the receiver really is a JavaScript object.
2515 __ mov(eax, Operand(esp, 0));
2516 __ test(eax, Immediate(kSmiTagMask));
2517 __ j(zero, &build_args);
2518 // We allow all JSObjects including JSFunctions. As long as
2519 // JS_FUNCTION_TYPE is the last instance type and it is right
2520 // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
2521 // bound.
2522 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
2523 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
2524 __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
2525 __ j(below, &build_args);
2526
2527 // Check that applicand.apply is Function.prototype.apply.
2528 __ mov(eax, Operand(esp, kPointerSize));
2529 __ test(eax, Immediate(kSmiTagMask));
2530 __ j(zero, &build_args);
2531 __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
2532 __ j(not_equal, &build_args);
2533 __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
2534 Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
2535 __ cmp(FieldOperand(ecx, SharedFunctionInfo::kCodeOffset),
2536 Immediate(apply_code));
2537 __ j(not_equal, &build_args);
2538
2539 // Check that applicand is a function.
2540 __ mov(edi, Operand(esp, 2 * kPointerSize));
2541 __ test(edi, Immediate(kSmiTagMask));
2542 __ j(zero, &build_args);
2543 __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
2544 __ j(not_equal, &build_args);
2545
2546 // Copy the arguments to this function possibly from the
2547 // adaptor frame below it.
2548 Label invoke, adapted;
2549 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2550 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
2551 __ cmp(Operand(ecx),
2552 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2553 __ j(equal, &adapted);
2554
2555 // No arguments adaptor frame. Copy fixed number of arguments.
2556 __ mov(eax, Immediate(scope()->num_parameters()));
2557 for (int i = 0; i < scope()->num_parameters(); i++) {
2558 __ push(frame_->ParameterAt(i));
2559 }
2560 __ jmp(&invoke);
2561
2562 // Arguments adaptor frame present. Copy arguments from there, but
2563 // avoid copying too many arguments to avoid stack overflows.
2564 __ bind(&adapted);
2565 static const uint32_t kArgumentsLimit = 1 * KB;
2566 __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
2567 __ SmiUntag(eax);
2568 __ mov(ecx, Operand(eax));
2569 __ cmp(eax, kArgumentsLimit);
2570 __ j(above, &build_args);
2571
2572 // Loop through the arguments pushing them onto the execution
2573 // stack. We don't inform the virtual frame of the push, so we don't
2574 // have to worry about getting rid of the elements from the virtual
2575 // frame.
2576 Label loop;
2577 // ecx is a small non-negative integer, due to the test above.
2578 __ test(ecx, Operand(ecx));
2579 __ j(zero, &invoke);
2580 __ bind(&loop);
2581 __ push(Operand(edx, ecx, times_pointer_size, 1 * kPointerSize));
2582 __ dec(ecx);
2583 __ j(not_zero, &loop);
2584
2585 // Invoke the function.
2586 __ bind(&invoke);
2587 ParameterCount actual(eax);
2588 __ InvokeFunction(edi, actual, CALL_FUNCTION);
2589 // Drop applicand.apply and applicand from the stack, and push
2590 // the result of the function call, but leave the spilled frame
2591 // unchanged, with 3 elements, so it is correct when we compile the
2592 // slow-case code.
2593 __ add(Operand(esp), Immediate(2 * kPointerSize));
2594 __ push(eax);
2595 // Stack now has 1 element:
2596 // esp[0]: result
2597 __ jmp(&done);
2598
2599 // Slow-case: Allocate the arguments object since we know it isn't
2600 // there, and fall-through to the slow-case where we call
2601 // applicand.apply.
2602 __ bind(&build_args);
2603 // Stack now has 3 elements, because we have jumped from where:
2604 // esp[0]: receiver
2605 // esp[1]: applicand.apply
2606 // esp[2]: applicand.
2607
2608 // StoreArgumentsObject requires a correct frame, and may modify it.
2609 Result arguments_object = StoreArgumentsObject(false);
2610 frame_->SpillAll();
2611 arguments_object.ToRegister();
2612 frame_->EmitPush(arguments_object.reg());
2613 arguments_object.Unuse();
2614 // Stack and frame now have 4 elements.
2615 __ bind(&slow);
2616 }
2617
2618 // Generic computation of x.apply(y, args) with no special optimization.
2619 // Flip applicand.apply and applicand on the stack, so
2620 // applicand looks like the receiver of the applicand.apply call.
2621 // Then process it as a normal function call.
2622 __ mov(eax, Operand(esp, 3 * kPointerSize));
2623 __ mov(ebx, Operand(esp, 2 * kPointerSize));
2624 __ mov(Operand(esp, 2 * kPointerSize), eax);
2625 __ mov(Operand(esp, 3 * kPointerSize), ebx);
2626
2627 CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
2628 Result res = frame_->CallStub(&call_function, 3);
2629 // The function and its two arguments have been dropped.
2630 frame_->Drop(1); // Drop the receiver as well.
2631 res.ToRegister();
2632 frame_->EmitPush(res.reg());
2633 // Stack now has 1 element:
2634 // esp[0]: result
2635 if (try_lazy) __ bind(&done);
2636 } // End of spilled scope.
2637 // Restore the context register after a call.
2638 frame_->RestoreContextRegister();
2639 }
2640
2641
2642 class DeferredStackCheck: public DeferredCode {
2643 public:
DeferredStackCheck()2644 DeferredStackCheck() {
2645 set_comment("[ DeferredStackCheck");
2646 }
2647
2648 virtual void Generate();
2649 };
2650
2651
Generate()2652 void DeferredStackCheck::Generate() {
2653 StackCheckStub stub;
2654 __ CallStub(&stub);
2655 }
2656
2657
CheckStack()2658 void CodeGenerator::CheckStack() {
2659 DeferredStackCheck* deferred = new DeferredStackCheck;
2660 ExternalReference stack_limit =
2661 ExternalReference::address_of_stack_limit();
2662 __ cmp(esp, Operand::StaticVariable(stack_limit));
2663 deferred->Branch(below);
2664 deferred->BindExit();
2665 }
2666
2667
VisitAndSpill(Statement * statement)2668 void CodeGenerator::VisitAndSpill(Statement* statement) {
2669 ASSERT(in_spilled_code());
2670 set_in_spilled_code(false);
2671 Visit(statement);
2672 if (frame_ != NULL) {
2673 frame_->SpillAll();
2674 }
2675 set_in_spilled_code(true);
2676 }
2677
2678
VisitStatementsAndSpill(ZoneList<Statement * > * statements)2679 void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
2680 ASSERT(in_spilled_code());
2681 set_in_spilled_code(false);
2682 VisitStatements(statements);
2683 if (frame_ != NULL) {
2684 frame_->SpillAll();
2685 }
2686 set_in_spilled_code(true);
2687 }
2688
2689
VisitStatements(ZoneList<Statement * > * statements)2690 void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
2691 ASSERT(!in_spilled_code());
2692 for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
2693 Visit(statements->at(i));
2694 }
2695 }
2696
2697
VisitBlock(Block * node)2698 void CodeGenerator::VisitBlock(Block* node) {
2699 ASSERT(!in_spilled_code());
2700 Comment cmnt(masm_, "[ Block");
2701 CodeForStatementPosition(node);
2702 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
2703 VisitStatements(node->statements());
2704 if (node->break_target()->is_linked()) {
2705 node->break_target()->Bind();
2706 }
2707 node->break_target()->Unuse();
2708 }
2709
2710
DeclareGlobals(Handle<FixedArray> pairs)2711 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
2712 // Call the runtime to declare the globals. The inevitable call
2713 // will sync frame elements to memory anyway, so we do it eagerly to
2714 // allow us to push the arguments directly into place.
2715 frame_->SyncRange(0, frame_->element_count() - 1);
2716
2717 frame_->EmitPush(esi); // The context is the first argument.
2718 frame_->EmitPush(Immediate(pairs));
2719 frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
2720 Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
2721 // Return value is ignored.
2722 }
2723
2724
VisitDeclaration(Declaration * node)2725 void CodeGenerator::VisitDeclaration(Declaration* node) {
2726 Comment cmnt(masm_, "[ Declaration");
2727 Variable* var = node->proxy()->var();
2728 ASSERT(var != NULL); // must have been resolved
2729 Slot* slot = var->slot();
2730
2731 // If it was not possible to allocate the variable at compile time,
2732 // we need to "declare" it at runtime to make sure it actually
2733 // exists in the local context.
2734 if (slot != NULL && slot->type() == Slot::LOOKUP) {
2735 // Variables with a "LOOKUP" slot were introduced as non-locals
2736 // during variable resolution and must have mode DYNAMIC.
2737 ASSERT(var->is_dynamic());
2738 // For now, just do a runtime call. Sync the virtual frame eagerly
2739 // so we can simply push the arguments into place.
2740 frame_->SyncRange(0, frame_->element_count() - 1);
2741 frame_->EmitPush(esi);
2742 frame_->EmitPush(Immediate(var->name()));
2743 // Declaration nodes are always introduced in one of two modes.
2744 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
2745 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
2746 frame_->EmitPush(Immediate(Smi::FromInt(attr)));
2747 // Push initial value, if any.
2748 // Note: For variables we must not push an initial value (such as
2749 // 'undefined') because we may have a (legal) redeclaration and we
2750 // must not destroy the current value.
2751 if (node->mode() == Variable::CONST) {
2752 frame_->EmitPush(Immediate(Factory::the_hole_value()));
2753 } else if (node->fun() != NULL) {
2754 Load(node->fun());
2755 } else {
2756 frame_->EmitPush(Immediate(Smi::FromInt(0))); // no initial value!
2757 }
2758 Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
2759 // Ignore the return value (declarations are statements).
2760 return;
2761 }
2762
2763 ASSERT(!var->is_global());
2764
2765 // If we have a function or a constant, we need to initialize the variable.
2766 Expression* val = NULL;
2767 if (node->mode() == Variable::CONST) {
2768 val = new Literal(Factory::the_hole_value());
2769 } else {
2770 val = node->fun(); // NULL if we don't have a function
2771 }
2772
2773 if (val != NULL) {
2774 {
2775 // Set the initial value.
2776 Reference target(this, node->proxy());
2777 Load(val);
2778 target.SetValue(NOT_CONST_INIT);
2779 // The reference is removed from the stack (preserving TOS) when
2780 // it goes out of scope.
2781 }
2782 // Get rid of the assigned value (declarations are statements).
2783 frame_->Drop();
2784 }
2785 }
2786
2787
VisitExpressionStatement(ExpressionStatement * node)2788 void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
2789 ASSERT(!in_spilled_code());
2790 Comment cmnt(masm_, "[ ExpressionStatement");
2791 CodeForStatementPosition(node);
2792 Expression* expression = node->expression();
2793 expression->MarkAsStatement();
2794 Load(expression);
2795 // Remove the lingering expression result from the top of stack.
2796 frame_->Drop();
2797 }
2798
2799
VisitEmptyStatement(EmptyStatement * node)2800 void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
2801 ASSERT(!in_spilled_code());
2802 Comment cmnt(masm_, "// EmptyStatement");
2803 CodeForStatementPosition(node);
2804 // nothing to do
2805 }
2806
2807
VisitIfStatement(IfStatement * node)2808 void CodeGenerator::VisitIfStatement(IfStatement* node) {
2809 ASSERT(!in_spilled_code());
2810 Comment cmnt(masm_, "[ IfStatement");
2811 // Generate different code depending on which parts of the if statement
2812 // are present or not.
2813 bool has_then_stm = node->HasThenStatement();
2814 bool has_else_stm = node->HasElseStatement();
2815
2816 CodeForStatementPosition(node);
2817 JumpTarget exit;
2818 if (has_then_stm && has_else_stm) {
2819 JumpTarget then;
2820 JumpTarget else_;
2821 ControlDestination dest(&then, &else_, true);
2822 LoadCondition(node->condition(), &dest, true);
2823
2824 if (dest.false_was_fall_through()) {
2825 // The else target was bound, so we compile the else part first.
2826 Visit(node->else_statement());
2827
2828 // We may have dangling jumps to the then part.
2829 if (then.is_linked()) {
2830 if (has_valid_frame()) exit.Jump();
2831 then.Bind();
2832 Visit(node->then_statement());
2833 }
2834 } else {
2835 // The then target was bound, so we compile the then part first.
2836 Visit(node->then_statement());
2837
2838 if (else_.is_linked()) {
2839 if (has_valid_frame()) exit.Jump();
2840 else_.Bind();
2841 Visit(node->else_statement());
2842 }
2843 }
2844
2845 } else if (has_then_stm) {
2846 ASSERT(!has_else_stm);
2847 JumpTarget then;
2848 ControlDestination dest(&then, &exit, true);
2849 LoadCondition(node->condition(), &dest, true);
2850
2851 if (dest.false_was_fall_through()) {
2852 // The exit label was bound. We may have dangling jumps to the
2853 // then part.
2854 if (then.is_linked()) {
2855 exit.Unuse();
2856 exit.Jump();
2857 then.Bind();
2858 Visit(node->then_statement());
2859 }
2860 } else {
2861 // The then label was bound.
2862 Visit(node->then_statement());
2863 }
2864
2865 } else if (has_else_stm) {
2866 ASSERT(!has_then_stm);
2867 JumpTarget else_;
2868 ControlDestination dest(&exit, &else_, false);
2869 LoadCondition(node->condition(), &dest, true);
2870
2871 if (dest.true_was_fall_through()) {
2872 // The exit label was bound. We may have dangling jumps to the
2873 // else part.
2874 if (else_.is_linked()) {
2875 exit.Unuse();
2876 exit.Jump();
2877 else_.Bind();
2878 Visit(node->else_statement());
2879 }
2880 } else {
2881 // The else label was bound.
2882 Visit(node->else_statement());
2883 }
2884
2885 } else {
2886 ASSERT(!has_then_stm && !has_else_stm);
2887 // We only care about the condition's side effects (not its value
2888 // or control flow effect). LoadCondition is called without
2889 // forcing control flow.
2890 ControlDestination dest(&exit, &exit, true);
2891 LoadCondition(node->condition(), &dest, false);
2892 if (!dest.is_used()) {
2893 // We got a value on the frame rather than (or in addition to)
2894 // control flow.
2895 frame_->Drop();
2896 }
2897 }
2898
2899 if (exit.is_linked()) {
2900 exit.Bind();
2901 }
2902 }
2903
2904
VisitContinueStatement(ContinueStatement * node)2905 void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
2906 ASSERT(!in_spilled_code());
2907 Comment cmnt(masm_, "[ ContinueStatement");
2908 CodeForStatementPosition(node);
2909 node->target()->continue_target()->Jump();
2910 }
2911
2912
VisitBreakStatement(BreakStatement * node)2913 void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
2914 ASSERT(!in_spilled_code());
2915 Comment cmnt(masm_, "[ BreakStatement");
2916 CodeForStatementPosition(node);
2917 node->target()->break_target()->Jump();
2918 }
2919
2920
VisitReturnStatement(ReturnStatement * node)2921 void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
2922 ASSERT(!in_spilled_code());
2923 Comment cmnt(masm_, "[ ReturnStatement");
2924
2925 CodeForStatementPosition(node);
2926 Load(node->expression());
2927 Result return_value = frame_->Pop();
2928 masm()->WriteRecordedPositions();
2929 if (function_return_is_shadowed_) {
2930 function_return_.Jump(&return_value);
2931 } else {
2932 frame_->PrepareForReturn();
2933 if (function_return_.is_bound()) {
2934 // If the function return label is already bound we reuse the
2935 // code by jumping to the return site.
2936 function_return_.Jump(&return_value);
2937 } else {
2938 function_return_.Bind(&return_value);
2939 GenerateReturnSequence(&return_value);
2940 }
2941 }
2942 }
2943
2944
GenerateReturnSequence(Result * return_value)2945 void CodeGenerator::GenerateReturnSequence(Result* return_value) {
2946 // The return value is a live (but not currently reference counted)
2947 // reference to eax. This is safe because the current frame does not
2948 // contain a reference to eax (it is prepared for the return by spilling
2949 // all registers).
2950 if (FLAG_trace) {
2951 frame_->Push(return_value);
2952 *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
2953 }
2954 return_value->ToRegister(eax);
2955
2956 // Add a label for checking the size of the code used for returning.
2957 Label check_exit_codesize;
2958 masm_->bind(&check_exit_codesize);
2959
2960 // Leave the frame and return popping the arguments and the
2961 // receiver.
2962 frame_->Exit();
2963 masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
2964 DeleteFrame();
2965
2966 #ifdef ENABLE_DEBUGGER_SUPPORT
2967 // Check that the size of the code used for returning matches what is
2968 // expected by the debugger.
2969 ASSERT_EQ(Assembler::kJSReturnSequenceLength,
2970 masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
2971 #endif
2972 }
2973
2974
VisitWithEnterStatement(WithEnterStatement * node)2975 void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
2976 ASSERT(!in_spilled_code());
2977 Comment cmnt(masm_, "[ WithEnterStatement");
2978 CodeForStatementPosition(node);
2979 Load(node->expression());
2980 Result context;
2981 if (node->is_catch_block()) {
2982 context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
2983 } else {
2984 context = frame_->CallRuntime(Runtime::kPushContext, 1);
2985 }
2986
2987 // Update context local.
2988 frame_->SaveContextRegister();
2989
2990 // Verify that the runtime call result and esi agree.
2991 if (FLAG_debug_code) {
2992 __ cmp(context.reg(), Operand(esi));
2993 __ Assert(equal, "Runtime::NewContext should end up in esi");
2994 }
2995 }
2996
2997
VisitWithExitStatement(WithExitStatement * node)2998 void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
2999 ASSERT(!in_spilled_code());
3000 Comment cmnt(masm_, "[ WithExitStatement");
3001 CodeForStatementPosition(node);
3002 // Pop context.
3003 __ mov(esi, ContextOperand(esi, Context::PREVIOUS_INDEX));
3004 // Update context local.
3005 frame_->SaveContextRegister();
3006 }
3007
3008
VisitSwitchStatement(SwitchStatement * node)3009 void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
3010 ASSERT(!in_spilled_code());
3011 Comment cmnt(masm_, "[ SwitchStatement");
3012 CodeForStatementPosition(node);
3013 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
3014
3015 // Compile the switch value.
3016 Load(node->tag());
3017
3018 ZoneList<CaseClause*>* cases = node->cases();
3019 int length = cases->length();
3020 CaseClause* default_clause = NULL;
3021
3022 JumpTarget next_test;
3023 // Compile the case label expressions and comparisons. Exit early
3024 // if a comparison is unconditionally true. The target next_test is
3025 // bound before the loop in order to indicate control flow to the
3026 // first comparison.
3027 next_test.Bind();
3028 for (int i = 0; i < length && !next_test.is_unused(); i++) {
3029 CaseClause* clause = cases->at(i);
3030 // The default is not a test, but remember it for later.
3031 if (clause->is_default()) {
3032 default_clause = clause;
3033 continue;
3034 }
3035
3036 Comment cmnt(masm_, "[ Case comparison");
3037 // We recycle the same target next_test for each test. Bind it if
3038 // the previous test has not done so and then unuse it for the
3039 // loop.
3040 if (next_test.is_linked()) {
3041 next_test.Bind();
3042 }
3043 next_test.Unuse();
3044
3045 // Duplicate the switch value.
3046 frame_->Dup();
3047
3048 // Compile the label expression.
3049 Load(clause->label());
3050
3051 // Compare and branch to the body if true or the next test if
3052 // false. Prefer the next test as a fall through.
3053 ControlDestination dest(clause->body_target(), &next_test, false);
3054 Comparison(node, equal, true, &dest);
3055
3056 // If the comparison fell through to the true target, jump to the
3057 // actual body.
3058 if (dest.true_was_fall_through()) {
3059 clause->body_target()->Unuse();
3060 clause->body_target()->Jump();
3061 }
3062 }
3063
3064 // If there was control flow to a next test from the last one
3065 // compiled, compile a jump to the default or break target.
3066 if (!next_test.is_unused()) {
3067 if (next_test.is_linked()) {
3068 next_test.Bind();
3069 }
3070 // Drop the switch value.
3071 frame_->Drop();
3072 if (default_clause != NULL) {
3073 default_clause->body_target()->Jump();
3074 } else {
3075 node->break_target()->Jump();
3076 }
3077 }
3078
3079
3080 // The last instruction emitted was a jump, either to the default
3081 // clause or the break target, or else to a case body from the loop
3082 // that compiles the tests.
3083 ASSERT(!has_valid_frame());
3084 // Compile case bodies as needed.
3085 for (int i = 0; i < length; i++) {
3086 CaseClause* clause = cases->at(i);
3087
3088 // There are two ways to reach the body: from the corresponding
3089 // test or as the fall through of the previous body.
3090 if (clause->body_target()->is_linked() || has_valid_frame()) {
3091 if (clause->body_target()->is_linked()) {
3092 if (has_valid_frame()) {
3093 // If we have both a jump to the test and a fall through, put
3094 // a jump on the fall through path to avoid the dropping of
3095 // the switch value on the test path. The exception is the
3096 // default which has already had the switch value dropped.
3097 if (clause->is_default()) {
3098 clause->body_target()->Bind();
3099 } else {
3100 JumpTarget body;
3101 body.Jump();
3102 clause->body_target()->Bind();
3103 frame_->Drop();
3104 body.Bind();
3105 }
3106 } else {
3107 // No fall through to worry about.
3108 clause->body_target()->Bind();
3109 if (!clause->is_default()) {
3110 frame_->Drop();
3111 }
3112 }
3113 } else {
3114 // Otherwise, we have only fall through.
3115 ASSERT(has_valid_frame());
3116 }
3117
3118 // We are now prepared to compile the body.
3119 Comment cmnt(masm_, "[ Case body");
3120 VisitStatements(clause->statements());
3121 }
3122 clause->body_target()->Unuse();
3123 }
3124
3125 // We may not have a valid frame here so bind the break target only
3126 // if needed.
3127 if (node->break_target()->is_linked()) {
3128 node->break_target()->Bind();
3129 }
3130 node->break_target()->Unuse();
3131 }
3132
3133
VisitDoWhileStatement(DoWhileStatement * node)3134 void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
3135 ASSERT(!in_spilled_code());
3136 Comment cmnt(masm_, "[ DoWhileStatement");
3137 CodeForStatementPosition(node);
3138 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
3139 JumpTarget body(JumpTarget::BIDIRECTIONAL);
3140 IncrementLoopNesting();
3141
3142 ConditionAnalysis info = AnalyzeCondition(node->cond());
3143 // Label the top of the loop for the backward jump if necessary.
3144 switch (info) {
3145 case ALWAYS_TRUE:
3146 // Use the continue target.
3147 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
3148 node->continue_target()->Bind();
3149 break;
3150 case ALWAYS_FALSE:
3151 // No need to label it.
3152 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3153 break;
3154 case DONT_KNOW:
3155 // Continue is the test, so use the backward body target.
3156 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3157 body.Bind();
3158 break;
3159 }
3160
3161 CheckStack(); // TODO(1222600): ignore if body contains calls.
3162 Visit(node->body());
3163
3164 // Compile the test.
3165 switch (info) {
3166 case ALWAYS_TRUE:
3167 // If control flow can fall off the end of the body, jump back to
3168 // the top and bind the break target at the exit.
3169 if (has_valid_frame()) {
3170 node->continue_target()->Jump();
3171 }
3172 if (node->break_target()->is_linked()) {
3173 node->break_target()->Bind();
3174 }
3175 break;
3176 case ALWAYS_FALSE:
3177 // We may have had continues or breaks in the body.
3178 if (node->continue_target()->is_linked()) {
3179 node->continue_target()->Bind();
3180 }
3181 if (node->break_target()->is_linked()) {
3182 node->break_target()->Bind();
3183 }
3184 break;
3185 case DONT_KNOW:
3186 // We have to compile the test expression if it can be reached by
3187 // control flow falling out of the body or via continue.
3188 if (node->continue_target()->is_linked()) {
3189 node->continue_target()->Bind();
3190 }
3191 if (has_valid_frame()) {
3192 Comment cmnt(masm_, "[ DoWhileCondition");
3193 CodeForDoWhileConditionPosition(node);
3194 ControlDestination dest(&body, node->break_target(), false);
3195 LoadCondition(node->cond(), &dest, true);
3196 }
3197 if (node->break_target()->is_linked()) {
3198 node->break_target()->Bind();
3199 }
3200 break;
3201 }
3202
3203 DecrementLoopNesting();
3204 }
3205
3206
VisitWhileStatement(WhileStatement * node)3207 void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
3208 ASSERT(!in_spilled_code());
3209 Comment cmnt(masm_, "[ WhileStatement");
3210 CodeForStatementPosition(node);
3211
3212 // If the condition is always false and has no side effects, we do not
3213 // need to compile anything.
3214 ConditionAnalysis info = AnalyzeCondition(node->cond());
3215 if (info == ALWAYS_FALSE) return;
3216
3217 // Do not duplicate conditions that may have function literal
3218 // subexpressions. This can cause us to compile the function literal
3219 // twice.
3220 bool test_at_bottom = !node->may_have_function_literal();
3221 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
3222 IncrementLoopNesting();
3223 JumpTarget body;
3224 if (test_at_bottom) {
3225 body.set_direction(JumpTarget::BIDIRECTIONAL);
3226 }
3227
3228 // Based on the condition analysis, compile the test as necessary.
3229 switch (info) {
3230 case ALWAYS_TRUE:
3231 // We will not compile the test expression. Label the top of the
3232 // loop with the continue target.
3233 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
3234 node->continue_target()->Bind();
3235 break;
3236 case DONT_KNOW: {
3237 if (test_at_bottom) {
3238 // Continue is the test at the bottom, no need to label the test
3239 // at the top. The body is a backward target.
3240 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3241 } else {
3242 // Label the test at the top as the continue target. The body
3243 // is a forward-only target.
3244 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
3245 node->continue_target()->Bind();
3246 }
3247 // Compile the test with the body as the true target and preferred
3248 // fall-through and with the break target as the false target.
3249 ControlDestination dest(&body, node->break_target(), true);
3250 LoadCondition(node->cond(), &dest, true);
3251
3252 if (dest.false_was_fall_through()) {
3253 // If we got the break target as fall-through, the test may have
3254 // been unconditionally false (if there are no jumps to the
3255 // body).
3256 if (!body.is_linked()) {
3257 DecrementLoopNesting();
3258 return;
3259 }
3260
3261 // Otherwise, jump around the body on the fall through and then
3262 // bind the body target.
3263 node->break_target()->Unuse();
3264 node->break_target()->Jump();
3265 body.Bind();
3266 }
3267 break;
3268 }
3269 case ALWAYS_FALSE:
3270 UNREACHABLE();
3271 break;
3272 }
3273
3274 CheckStack(); // TODO(1222600): ignore if body contains calls.
3275 Visit(node->body());
3276
3277 // Based on the condition analysis, compile the backward jump as
3278 // necessary.
3279 switch (info) {
3280 case ALWAYS_TRUE:
3281 // The loop body has been labeled with the continue target.
3282 if (has_valid_frame()) {
3283 node->continue_target()->Jump();
3284 }
3285 break;
3286 case DONT_KNOW:
3287 if (test_at_bottom) {
3288 // If we have chosen to recompile the test at the bottom, then
3289 // it is the continue target.
3290 if (node->continue_target()->is_linked()) {
3291 node->continue_target()->Bind();
3292 }
3293 if (has_valid_frame()) {
3294 // The break target is the fall-through (body is a backward
3295 // jump from here and thus an invalid fall-through).
3296 ControlDestination dest(&body, node->break_target(), false);
3297 LoadCondition(node->cond(), &dest, true);
3298 }
3299 } else {
3300 // If we have chosen not to recompile the test at the bottom,
3301 // jump back to the one at the top.
3302 if (has_valid_frame()) {
3303 node->continue_target()->Jump();
3304 }
3305 }
3306 break;
3307 case ALWAYS_FALSE:
3308 UNREACHABLE();
3309 break;
3310 }
3311
3312 // The break target may be already bound (by the condition), or there
3313 // may not be a valid frame. Bind it only if needed.
3314 if (node->break_target()->is_linked()) {
3315 node->break_target()->Bind();
3316 }
3317 DecrementLoopNesting();
3318 }
3319
3320
VisitForStatement(ForStatement * node)3321 void CodeGenerator::VisitForStatement(ForStatement* node) {
3322 ASSERT(!in_spilled_code());
3323 Comment cmnt(masm_, "[ ForStatement");
3324 CodeForStatementPosition(node);
3325
3326 // Compile the init expression if present.
3327 if (node->init() != NULL) {
3328 Visit(node->init());
3329 }
3330
3331 // If the condition is always false and has no side effects, we do not
3332 // need to compile anything else.
3333 ConditionAnalysis info = AnalyzeCondition(node->cond());
3334 if (info == ALWAYS_FALSE) return;
3335
3336 // Do not duplicate conditions that may have function literal
3337 // subexpressions. This can cause us to compile the function literal
3338 // twice.
3339 bool test_at_bottom = !node->may_have_function_literal();
3340 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
3341 IncrementLoopNesting();
3342
3343 // Target for backward edge if no test at the bottom, otherwise
3344 // unused.
3345 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
3346
3347 // Target for backward edge if there is a test at the bottom,
3348 // otherwise used as target for test at the top.
3349 JumpTarget body;
3350 if (test_at_bottom) {
3351 body.set_direction(JumpTarget::BIDIRECTIONAL);
3352 }
3353
3354 // Based on the condition analysis, compile the test as necessary.
3355 switch (info) {
3356 case ALWAYS_TRUE:
3357 // We will not compile the test expression. Label the top of the
3358 // loop.
3359 if (node->next() == NULL) {
3360 // Use the continue target if there is no update expression.
3361 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
3362 node->continue_target()->Bind();
3363 } else {
3364 // Otherwise use the backward loop target.
3365 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3366 loop.Bind();
3367 }
3368 break;
3369 case DONT_KNOW: {
3370 if (test_at_bottom) {
3371 // Continue is either the update expression or the test at the
3372 // bottom, no need to label the test at the top.
3373 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3374 } else if (node->next() == NULL) {
3375 // We are not recompiling the test at the bottom and there is no
3376 // update expression.
3377 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
3378 node->continue_target()->Bind();
3379 } else {
3380 // We are not recompiling the test at the bottom and there is an
3381 // update expression.
3382 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3383 loop.Bind();
3384 }
3385 // Compile the test with the body as the true target and preferred
3386 // fall-through and with the break target as the false target.
3387 ControlDestination dest(&body, node->break_target(), true);
3388 LoadCondition(node->cond(), &dest, true);
3389
3390 if (dest.false_was_fall_through()) {
3391 // If we got the break target as fall-through, the test may have
3392 // been unconditionally false (if there are no jumps to the
3393 // body).
3394 if (!body.is_linked()) {
3395 DecrementLoopNesting();
3396 return;
3397 }
3398
3399 // Otherwise, jump around the body on the fall through and then
3400 // bind the body target.
3401 node->break_target()->Unuse();
3402 node->break_target()->Jump();
3403 body.Bind();
3404 }
3405 break;
3406 }
3407 case ALWAYS_FALSE:
3408 UNREACHABLE();
3409 break;
3410 }
3411
3412 CheckStack(); // TODO(1222600): ignore if body contains calls.
3413 Visit(node->body());
3414
3415 // If there is an update expression, compile it if necessary.
3416 if (node->next() != NULL) {
3417 if (node->continue_target()->is_linked()) {
3418 node->continue_target()->Bind();
3419 }
3420
3421 // Control can reach the update by falling out of the body or by a
3422 // continue.
3423 if (has_valid_frame()) {
3424 // Record the source position of the statement as this code which
3425 // is after the code for the body actually belongs to the loop
3426 // statement and not the body.
3427 CodeForStatementPosition(node);
3428 Visit(node->next());
3429 }
3430 }
3431
3432 // Based on the condition analysis, compile the backward jump as
3433 // necessary.
3434 switch (info) {
3435 case ALWAYS_TRUE:
3436 if (has_valid_frame()) {
3437 if (node->next() == NULL) {
3438 node->continue_target()->Jump();
3439 } else {
3440 loop.Jump();
3441 }
3442 }
3443 break;
3444 case DONT_KNOW:
3445 if (test_at_bottom) {
3446 if (node->continue_target()->is_linked()) {
3447 // We can have dangling jumps to the continue target if there
3448 // was no update expression.
3449 node->continue_target()->Bind();
3450 }
3451 // Control can reach the test at the bottom by falling out of
3452 // the body, by a continue in the body, or from the update
3453 // expression.
3454 if (has_valid_frame()) {
3455 // The break target is the fall-through (body is a backward
3456 // jump from here).
3457 ControlDestination dest(&body, node->break_target(), false);
3458 LoadCondition(node->cond(), &dest, true);
3459 }
3460 } else {
3461 // Otherwise, jump back to the test at the top.
3462 if (has_valid_frame()) {
3463 if (node->next() == NULL) {
3464 node->continue_target()->Jump();
3465 } else {
3466 loop.Jump();
3467 }
3468 }
3469 }
3470 break;
3471 case ALWAYS_FALSE:
3472 UNREACHABLE();
3473 break;
3474 }
3475
3476 // The break target may be already bound (by the condition), or
3477 // there may not be a valid frame. Bind it only if needed.
3478 if (node->break_target()->is_linked()) {
3479 node->break_target()->Bind();
3480 }
3481 DecrementLoopNesting();
3482 }
3483
3484
VisitForInStatement(ForInStatement * node)3485 void CodeGenerator::VisitForInStatement(ForInStatement* node) {
3486 ASSERT(!in_spilled_code());
3487 VirtualFrame::SpilledScope spilled_scope;
3488 Comment cmnt(masm_, "[ ForInStatement");
3489 CodeForStatementPosition(node);
3490
3491 JumpTarget primitive;
3492 JumpTarget jsobject;
3493 JumpTarget fixed_array;
3494 JumpTarget entry(JumpTarget::BIDIRECTIONAL);
3495 JumpTarget end_del_check;
3496 JumpTarget exit;
3497
3498 // Get the object to enumerate over (converted to JSObject).
3499 LoadAndSpill(node->enumerable());
3500
3501 // Both SpiderMonkey and kjs ignore null and undefined in contrast
3502 // to the specification. 12.6.4 mandates a call to ToObject.
3503 frame_->EmitPop(eax);
3504
3505 // eax: value to be iterated over
3506 __ cmp(eax, Factory::undefined_value());
3507 exit.Branch(equal);
3508 __ cmp(eax, Factory::null_value());
3509 exit.Branch(equal);
3510
3511 // Stack layout in body:
3512 // [iteration counter (smi)] <- slot 0
3513 // [length of array] <- slot 1
3514 // [FixedArray] <- slot 2
3515 // [Map or 0] <- slot 3
3516 // [Object] <- slot 4
3517
3518 // Check if enumerable is already a JSObject
3519 // eax: value to be iterated over
3520 __ test(eax, Immediate(kSmiTagMask));
3521 primitive.Branch(zero);
3522 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
3523 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
3524 __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
3525 jsobject.Branch(above_equal);
3526
3527 primitive.Bind();
3528 frame_->EmitPush(eax);
3529 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
3530 // function call returns the value in eax, which is where we want it below
3531
3532 jsobject.Bind();
3533 // Get the set of properties (as a FixedArray or Map).
3534 // eax: value to be iterated over
3535 frame_->EmitPush(eax); // Push the object being iterated over.
3536
3537 // Check cache validity in generated code. This is a fast case for
3538 // the JSObject::IsSimpleEnum cache validity checks. If we cannot
3539 // guarantee cache validity, call the runtime system to check cache
3540 // validity or get the property names in a fixed array.
3541 JumpTarget call_runtime;
3542 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
3543 JumpTarget check_prototype;
3544 JumpTarget use_cache;
3545 __ mov(ecx, eax);
3546 loop.Bind();
3547 // Check that there are no elements.
3548 __ mov(edx, FieldOperand(ecx, JSObject::kElementsOffset));
3549 __ cmp(Operand(edx), Immediate(Factory::empty_fixed_array()));
3550 call_runtime.Branch(not_equal);
3551 // Check that instance descriptors are not empty so that we can
3552 // check for an enum cache. Leave the map in ebx for the subsequent
3553 // prototype load.
3554 __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
3555 __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
3556 __ cmp(Operand(edx), Immediate(Factory::empty_descriptor_array()));
3557 call_runtime.Branch(equal);
3558 // Check that there in an enum cache in the non-empty instance
3559 // descriptors. This is the case if the next enumeration index
3560 // field does not contain a smi.
3561 __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
3562 __ test(edx, Immediate(kSmiTagMask));
3563 call_runtime.Branch(zero);
3564 // For all objects but the receiver, check that the cache is empty.
3565 __ cmp(ecx, Operand(eax));
3566 check_prototype.Branch(equal);
3567 __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
3568 __ cmp(Operand(edx), Immediate(Factory::empty_fixed_array()));
3569 call_runtime.Branch(not_equal);
3570 check_prototype.Bind();
3571 // Load the prototype from the map and loop if non-null.
3572 __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
3573 __ cmp(Operand(ecx), Immediate(Factory::null_value()));
3574 loop.Branch(not_equal);
3575 // The enum cache is valid. Load the map of the object being
3576 // iterated over and use the cache for the iteration.
3577 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
3578 use_cache.Jump();
3579
3580 call_runtime.Bind();
3581 // Call the runtime to get the property names for the object.
3582 frame_->EmitPush(eax); // push the Object (slot 4) for the runtime call
3583 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
3584
3585 // If we got a map from the runtime call, we can do a fast
3586 // modification check. Otherwise, we got a fixed array, and we have
3587 // to do a slow check.
3588 // eax: map or fixed array (result from call to
3589 // Runtime::kGetPropertyNamesFast)
3590 __ mov(edx, Operand(eax));
3591 __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
3592 __ cmp(ecx, Factory::meta_map());
3593 fixed_array.Branch(not_equal);
3594
3595 use_cache.Bind();
3596 // Get enum cache
3597 // eax: map (either the result from a call to
3598 // Runtime::kGetPropertyNamesFast or has been fetched directly from
3599 // the object)
3600 __ mov(ecx, Operand(eax));
3601
3602 __ mov(ecx, FieldOperand(ecx, Map::kInstanceDescriptorsOffset));
3603 // Get the bridge array held in the enumeration index field.
3604 __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
3605 // Get the cache from the bridge array.
3606 __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
3607
3608 frame_->EmitPush(eax); // <- slot 3
3609 frame_->EmitPush(edx); // <- slot 2
3610 __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
3611 __ SmiTag(eax);
3612 frame_->EmitPush(eax); // <- slot 1
3613 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
3614 entry.Jump();
3615
3616 fixed_array.Bind();
3617 // eax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
3618 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 3
3619 frame_->EmitPush(eax); // <- slot 2
3620
3621 // Push the length of the array and the initial index onto the stack.
3622 __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
3623 __ SmiTag(eax);
3624 frame_->EmitPush(eax); // <- slot 1
3625 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
3626
3627 // Condition.
3628 entry.Bind();
3629 // Grab the current frame's height for the break and continue
3630 // targets only after all the state is pushed on the frame.
3631 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
3632 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3633
3634 __ mov(eax, frame_->ElementAt(0)); // load the current count
3635 __ cmp(eax, frame_->ElementAt(1)); // compare to the array length
3636 node->break_target()->Branch(above_equal);
3637
3638 // Get the i'th entry of the array.
3639 __ mov(edx, frame_->ElementAt(2));
3640 __ mov(ebx, Operand(edx, eax, times_2,
3641 FixedArray::kHeaderSize - kHeapObjectTag));
3642
3643 // Get the expected map from the stack or a zero map in the
3644 // permanent slow case eax: current iteration count ebx: i'th entry
3645 // of the enum cache
3646 __ mov(edx, frame_->ElementAt(3));
3647 // Check if the expected map still matches that of the enumerable.
3648 // If not, we have to filter the key.
3649 // eax: current iteration count
3650 // ebx: i'th entry of the enum cache
3651 // edx: expected map value
3652 __ mov(ecx, frame_->ElementAt(4));
3653 __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
3654 __ cmp(ecx, Operand(edx));
3655 end_del_check.Branch(equal);
3656
3657 // Convert the entry to a string (or null if it isn't a property anymore).
3658 frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
3659 frame_->EmitPush(ebx); // push entry
3660 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
3661 __ mov(ebx, Operand(eax));
3662
3663 // If the property has been removed while iterating, we just skip it.
3664 __ cmp(ebx, Factory::null_value());
3665 node->continue_target()->Branch(equal);
3666
3667 end_del_check.Bind();
3668 // Store the entry in the 'each' expression and take another spin in the
3669 // loop. edx: i'th entry of the enum cache (or string there of)
3670 frame_->EmitPush(ebx);
3671 { Reference each(this, node->each());
3672 // Loading a reference may leave the frame in an unspilled state.
3673 frame_->SpillAll();
3674 if (!each.is_illegal()) {
3675 if (each.size() > 0) {
3676 frame_->EmitPush(frame_->ElementAt(each.size()));
3677 each.SetValue(NOT_CONST_INIT);
3678 frame_->Drop(2);
3679 } else {
3680 // If the reference was to a slot we rely on the convenient property
3681 // that it doesn't matter whether a value (eg, ebx pushed above) is
3682 // right on top of or right underneath a zero-sized reference.
3683 each.SetValue(NOT_CONST_INIT);
3684 frame_->Drop();
3685 }
3686 }
3687 }
3688 // Unloading a reference may leave the frame in an unspilled state.
3689 frame_->SpillAll();
3690
3691 // Body.
3692 CheckStack(); // TODO(1222600): ignore if body contains calls.
3693 VisitAndSpill(node->body());
3694
3695 // Next. Reestablish a spilled frame in case we are coming here via
3696 // a continue in the body.
3697 node->continue_target()->Bind();
3698 frame_->SpillAll();
3699 frame_->EmitPop(eax);
3700 __ add(Operand(eax), Immediate(Smi::FromInt(1)));
3701 frame_->EmitPush(eax);
3702 entry.Jump();
3703
3704 // Cleanup. No need to spill because VirtualFrame::Drop is safe for
3705 // any frame.
3706 node->break_target()->Bind();
3707 frame_->Drop(5);
3708
3709 // Exit.
3710 exit.Bind();
3711
3712 node->continue_target()->Unuse();
3713 node->break_target()->Unuse();
3714 }
3715
3716
VisitTryCatchStatement(TryCatchStatement * node)3717 void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
3718 ASSERT(!in_spilled_code());
3719 VirtualFrame::SpilledScope spilled_scope;
3720 Comment cmnt(masm_, "[ TryCatchStatement");
3721 CodeForStatementPosition(node);
3722
3723 JumpTarget try_block;
3724 JumpTarget exit;
3725
3726 try_block.Call();
3727 // --- Catch block ---
3728 frame_->EmitPush(eax);
3729
3730 // Store the caught exception in the catch variable.
3731 Variable* catch_var = node->catch_var()->var();
3732 ASSERT(catch_var != NULL && catch_var->slot() != NULL);
3733 StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
3734
3735 // Remove the exception from the stack.
3736 frame_->Drop();
3737
3738 VisitStatementsAndSpill(node->catch_block()->statements());
3739 if (has_valid_frame()) {
3740 exit.Jump();
3741 }
3742
3743
3744 // --- Try block ---
3745 try_block.Bind();
3746
3747 frame_->PushTryHandler(TRY_CATCH_HANDLER);
3748 int handler_height = frame_->height();
3749
3750 // Shadow the jump targets for all escapes from the try block, including
3751 // returns. During shadowing, the original target is hidden as the
3752 // ShadowTarget and operations on the original actually affect the
3753 // shadowing target.
3754 //
3755 // We should probably try to unify the escaping targets and the return
3756 // target.
3757 int nof_escapes = node->escaping_targets()->length();
3758 List<ShadowTarget*> shadows(1 + nof_escapes);
3759
3760 // Add the shadow target for the function return.
3761 static const int kReturnShadowIndex = 0;
3762 shadows.Add(new ShadowTarget(&function_return_));
3763 bool function_return_was_shadowed = function_return_is_shadowed_;
3764 function_return_is_shadowed_ = true;
3765 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
3766
3767 // Add the remaining shadow targets.
3768 for (int i = 0; i < nof_escapes; i++) {
3769 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
3770 }
3771
3772 // Generate code for the statements in the try block.
3773 VisitStatementsAndSpill(node->try_block()->statements());
3774
3775 // Stop the introduced shadowing and count the number of required unlinks.
3776 // After shadowing stops, the original targets are unshadowed and the
3777 // ShadowTargets represent the formerly shadowing targets.
3778 bool has_unlinks = false;
3779 for (int i = 0; i < shadows.length(); i++) {
3780 shadows[i]->StopShadowing();
3781 has_unlinks = has_unlinks || shadows[i]->is_linked();
3782 }
3783 function_return_is_shadowed_ = function_return_was_shadowed;
3784
3785 // Get an external reference to the handler address.
3786 ExternalReference handler_address(Top::k_handler_address);
3787
3788 // Make sure that there's nothing left on the stack above the
3789 // handler structure.
3790 if (FLAG_debug_code) {
3791 __ mov(eax, Operand::StaticVariable(handler_address));
3792 __ cmp(esp, Operand(eax));
3793 __ Assert(equal, "stack pointer should point to top handler");
3794 }
3795
3796 // If we can fall off the end of the try block, unlink from try chain.
3797 if (has_valid_frame()) {
3798 // The next handler address is on top of the frame. Unlink from
3799 // the handler list and drop the rest of this handler from the
3800 // frame.
3801 ASSERT(StackHandlerConstants::kNextOffset == 0);
3802 frame_->EmitPop(Operand::StaticVariable(handler_address));
3803 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
3804 if (has_unlinks) {
3805 exit.Jump();
3806 }
3807 }
3808
3809 // Generate unlink code for the (formerly) shadowing targets that
3810 // have been jumped to. Deallocate each shadow target.
3811 Result return_value;
3812 for (int i = 0; i < shadows.length(); i++) {
3813 if (shadows[i]->is_linked()) {
3814 // Unlink from try chain; be careful not to destroy the TOS if
3815 // there is one.
3816 if (i == kReturnShadowIndex) {
3817 shadows[i]->Bind(&return_value);
3818 return_value.ToRegister(eax);
3819 } else {
3820 shadows[i]->Bind();
3821 }
3822 // Because we can be jumping here (to spilled code) from
3823 // unspilled code, we need to reestablish a spilled frame at
3824 // this block.
3825 frame_->SpillAll();
3826
3827 // Reload sp from the top handler, because some statements that we
3828 // break from (eg, for...in) may have left stuff on the stack.
3829 __ mov(esp, Operand::StaticVariable(handler_address));
3830 frame_->Forget(frame_->height() - handler_height);
3831
3832 ASSERT(StackHandlerConstants::kNextOffset == 0);
3833 frame_->EmitPop(Operand::StaticVariable(handler_address));
3834 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
3835
3836 if (i == kReturnShadowIndex) {
3837 if (!function_return_is_shadowed_) frame_->PrepareForReturn();
3838 shadows[i]->other_target()->Jump(&return_value);
3839 } else {
3840 shadows[i]->other_target()->Jump();
3841 }
3842 }
3843 }
3844
3845 exit.Bind();
3846 }
3847
3848
VisitTryFinallyStatement(TryFinallyStatement * node)3849 void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
3850 ASSERT(!in_spilled_code());
3851 VirtualFrame::SpilledScope spilled_scope;
3852 Comment cmnt(masm_, "[ TryFinallyStatement");
3853 CodeForStatementPosition(node);
3854
3855 // State: Used to keep track of reason for entering the finally
3856 // block. Should probably be extended to hold information for
3857 // break/continue from within the try block.
3858 enum { FALLING, THROWING, JUMPING };
3859
3860 JumpTarget try_block;
3861 JumpTarget finally_block;
3862
3863 try_block.Call();
3864
3865 frame_->EmitPush(eax);
3866 // In case of thrown exceptions, this is where we continue.
3867 __ Set(ecx, Immediate(Smi::FromInt(THROWING)));
3868 finally_block.Jump();
3869
3870 // --- Try block ---
3871 try_block.Bind();
3872
3873 frame_->PushTryHandler(TRY_FINALLY_HANDLER);
3874 int handler_height = frame_->height();
3875
3876 // Shadow the jump targets for all escapes from the try block, including
3877 // returns. During shadowing, the original target is hidden as the
3878 // ShadowTarget and operations on the original actually affect the
3879 // shadowing target.
3880 //
3881 // We should probably try to unify the escaping targets and the return
3882 // target.
3883 int nof_escapes = node->escaping_targets()->length();
3884 List<ShadowTarget*> shadows(1 + nof_escapes);
3885
3886 // Add the shadow target for the function return.
3887 static const int kReturnShadowIndex = 0;
3888 shadows.Add(new ShadowTarget(&function_return_));
3889 bool function_return_was_shadowed = function_return_is_shadowed_;
3890 function_return_is_shadowed_ = true;
3891 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
3892
3893 // Add the remaining shadow targets.
3894 for (int i = 0; i < nof_escapes; i++) {
3895 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
3896 }
3897
3898 // Generate code for the statements in the try block.
3899 VisitStatementsAndSpill(node->try_block()->statements());
3900
3901 // Stop the introduced shadowing and count the number of required unlinks.
3902 // After shadowing stops, the original targets are unshadowed and the
3903 // ShadowTargets represent the formerly shadowing targets.
3904 int nof_unlinks = 0;
3905 for (int i = 0; i < shadows.length(); i++) {
3906 shadows[i]->StopShadowing();
3907 if (shadows[i]->is_linked()) nof_unlinks++;
3908 }
3909 function_return_is_shadowed_ = function_return_was_shadowed;
3910
3911 // Get an external reference to the handler address.
3912 ExternalReference handler_address(Top::k_handler_address);
3913
3914 // If we can fall off the end of the try block, unlink from the try
3915 // chain and set the state on the frame to FALLING.
3916 if (has_valid_frame()) {
3917 // The next handler address is on top of the frame.
3918 ASSERT(StackHandlerConstants::kNextOffset == 0);
3919 frame_->EmitPop(Operand::StaticVariable(handler_address));
3920 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
3921
3922 // Fake a top of stack value (unneeded when FALLING) and set the
3923 // state in ecx, then jump around the unlink blocks if any.
3924 frame_->EmitPush(Immediate(Factory::undefined_value()));
3925 __ Set(ecx, Immediate(Smi::FromInt(FALLING)));
3926 if (nof_unlinks > 0) {
3927 finally_block.Jump();
3928 }
3929 }
3930
3931 // Generate code to unlink and set the state for the (formerly)
3932 // shadowing targets that have been jumped to.
3933 for (int i = 0; i < shadows.length(); i++) {
3934 if (shadows[i]->is_linked()) {
3935 // If we have come from the shadowed return, the return value is
3936 // on the virtual frame. We must preserve it until it is
3937 // pushed.
3938 if (i == kReturnShadowIndex) {
3939 Result return_value;
3940 shadows[i]->Bind(&return_value);
3941 return_value.ToRegister(eax);
3942 } else {
3943 shadows[i]->Bind();
3944 }
3945 // Because we can be jumping here (to spilled code) from
3946 // unspilled code, we need to reestablish a spilled frame at
3947 // this block.
3948 frame_->SpillAll();
3949
3950 // Reload sp from the top handler, because some statements that
3951 // we break from (eg, for...in) may have left stuff on the
3952 // stack.
3953 __ mov(esp, Operand::StaticVariable(handler_address));
3954 frame_->Forget(frame_->height() - handler_height);
3955
3956 // Unlink this handler and drop it from the frame.
3957 ASSERT(StackHandlerConstants::kNextOffset == 0);
3958 frame_->EmitPop(Operand::StaticVariable(handler_address));
3959 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
3960
3961 if (i == kReturnShadowIndex) {
3962 // If this target shadowed the function return, materialize
3963 // the return value on the stack.
3964 frame_->EmitPush(eax);
3965 } else {
3966 // Fake TOS for targets that shadowed breaks and continues.
3967 frame_->EmitPush(Immediate(Factory::undefined_value()));
3968 }
3969 __ Set(ecx, Immediate(Smi::FromInt(JUMPING + i)));
3970 if (--nof_unlinks > 0) {
3971 // If this is not the last unlink block, jump around the next.
3972 finally_block.Jump();
3973 }
3974 }
3975 }
3976
3977 // --- Finally block ---
3978 finally_block.Bind();
3979
3980 // Push the state on the stack.
3981 frame_->EmitPush(ecx);
3982
3983 // We keep two elements on the stack - the (possibly faked) result
3984 // and the state - while evaluating the finally block.
3985 //
3986 // Generate code for the statements in the finally block.
3987 VisitStatementsAndSpill(node->finally_block()->statements());
3988
3989 if (has_valid_frame()) {
3990 // Restore state and return value or faked TOS.
3991 frame_->EmitPop(ecx);
3992 frame_->EmitPop(eax);
3993 }
3994
3995 // Generate code to jump to the right destination for all used
3996 // formerly shadowing targets. Deallocate each shadow target.
3997 for (int i = 0; i < shadows.length(); i++) {
3998 if (has_valid_frame() && shadows[i]->is_bound()) {
3999 BreakTarget* original = shadows[i]->other_target();
4000 __ cmp(Operand(ecx), Immediate(Smi::FromInt(JUMPING + i)));
4001 if (i == kReturnShadowIndex) {
4002 // The return value is (already) in eax.
4003 Result return_value = allocator_->Allocate(eax);
4004 ASSERT(return_value.is_valid());
4005 if (function_return_is_shadowed_) {
4006 original->Branch(equal, &return_value);
4007 } else {
4008 // Branch around the preparation for return which may emit
4009 // code.
4010 JumpTarget skip;
4011 skip.Branch(not_equal);
4012 frame_->PrepareForReturn();
4013 original->Jump(&return_value);
4014 skip.Bind();
4015 }
4016 } else {
4017 original->Branch(equal);
4018 }
4019 }
4020 }
4021
4022 if (has_valid_frame()) {
4023 // Check if we need to rethrow the exception.
4024 JumpTarget exit;
4025 __ cmp(Operand(ecx), Immediate(Smi::FromInt(THROWING)));
4026 exit.Branch(not_equal);
4027
4028 // Rethrow exception.
4029 frame_->EmitPush(eax); // undo pop from above
4030 frame_->CallRuntime(Runtime::kReThrow, 1);
4031
4032 // Done.
4033 exit.Bind();
4034 }
4035 }
4036
4037
VisitDebuggerStatement(DebuggerStatement * node)4038 void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
4039 ASSERT(!in_spilled_code());
4040 Comment cmnt(masm_, "[ DebuggerStatement");
4041 CodeForStatementPosition(node);
4042 #ifdef ENABLE_DEBUGGER_SUPPORT
4043 // Spill everything, even constants, to the frame.
4044 frame_->SpillAll();
4045
4046 frame_->DebugBreak();
4047 // Ignore the return value.
4048 #endif
4049 }
4050
4051
InstantiateBoilerplate(Handle<JSFunction> boilerplate)4052 Result CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
4053 ASSERT(boilerplate->IsBoilerplate());
4054
4055 // The inevitable call will sync frame elements to memory anyway, so
4056 // we do it eagerly to allow us to push the arguments directly into
4057 // place.
4058 frame()->SyncRange(0, frame()->element_count() - 1);
4059
4060 // Use the fast case closure allocation code that allocates in new
4061 // space for nested functions that don't need literals cloning.
4062 if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
4063 FastNewClosureStub stub;
4064 frame()->EmitPush(Immediate(boilerplate));
4065 return frame()->CallStub(&stub, 1);
4066 } else {
4067 // Call the runtime to instantiate the function boilerplate
4068 // object.
4069 frame()->EmitPush(esi);
4070 frame()->EmitPush(Immediate(boilerplate));
4071 return frame()->CallRuntime(Runtime::kNewClosure, 2);
4072 }
4073 }
4074
4075
VisitFunctionLiteral(FunctionLiteral * node)4076 void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
4077 Comment cmnt(masm_, "[ FunctionLiteral");
4078
4079 // Build the function boilerplate and instantiate it.
4080 Handle<JSFunction> boilerplate =
4081 Compiler::BuildBoilerplate(node, script(), this);
4082 // Check for stack-overflow exception.
4083 if (HasStackOverflow()) return;
4084 Result result = InstantiateBoilerplate(boilerplate);
4085 frame()->Push(&result);
4086 }
4087
4088
VisitFunctionBoilerplateLiteral(FunctionBoilerplateLiteral * node)4089 void CodeGenerator::VisitFunctionBoilerplateLiteral(
4090 FunctionBoilerplateLiteral* node) {
4091 Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
4092 Result result = InstantiateBoilerplate(node->boilerplate());
4093 frame()->Push(&result);
4094 }
4095
4096
VisitConditional(Conditional * node)4097 void CodeGenerator::VisitConditional(Conditional* node) {
4098 Comment cmnt(masm_, "[ Conditional");
4099 JumpTarget then;
4100 JumpTarget else_;
4101 JumpTarget exit;
4102 ControlDestination dest(&then, &else_, true);
4103 LoadCondition(node->condition(), &dest, true);
4104
4105 if (dest.false_was_fall_through()) {
4106 // The else target was bound, so we compile the else part first.
4107 Load(node->else_expression());
4108
4109 if (then.is_linked()) {
4110 exit.Jump();
4111 then.Bind();
4112 Load(node->then_expression());
4113 }
4114 } else {
4115 // The then target was bound, so we compile the then part first.
4116 Load(node->then_expression());
4117
4118 if (else_.is_linked()) {
4119 exit.Jump();
4120 else_.Bind();
4121 Load(node->else_expression());
4122 }
4123 }
4124
4125 exit.Bind();
4126 }
4127
4128
LoadFromSlot(Slot * slot,TypeofState typeof_state)4129 Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
4130 Result result;
4131 if (slot->type() == Slot::LOOKUP) {
4132 ASSERT(slot->var()->is_dynamic());
4133 JumpTarget slow;
4134 JumpTarget done;
4135
4136 // Generate fast-case code for variables that might be shadowed by
4137 // eval-introduced variables. Eval is used a lot without
4138 // introducing variables. In those cases, we do not want to
4139 // perform a runtime call for all variables in the scope
4140 // containing the eval.
4141 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
4142 result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
4143 // If there was no control flow to slow, we can exit early.
4144 if (!slow.is_linked()) return result;
4145 done.Jump(&result);
4146
4147 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
4148 Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
4149 // Only generate the fast case for locals that rewrite to slots.
4150 // This rules out argument loads.
4151 if (potential_slot != NULL) {
4152 // Allocate a fresh register to use as a temp in
4153 // ContextSlotOperandCheckExtensions and to hold the result
4154 // value.
4155 result = allocator()->Allocate();
4156 ASSERT(result.is_valid());
4157 __ mov(result.reg(),
4158 ContextSlotOperandCheckExtensions(potential_slot,
4159 result,
4160 &slow));
4161 if (potential_slot->var()->mode() == Variable::CONST) {
4162 __ cmp(result.reg(), Factory::the_hole_value());
4163 done.Branch(not_equal, &result);
4164 __ mov(result.reg(), Factory::undefined_value());
4165 }
4166 // There is always control flow to slow from
4167 // ContextSlotOperandCheckExtensions so we have to jump around
4168 // it.
4169 done.Jump(&result);
4170 }
4171 }
4172
4173 slow.Bind();
4174 // A runtime call is inevitable. We eagerly sync frame elements
4175 // to memory so that we can push the arguments directly into place
4176 // on top of the frame.
4177 frame()->SyncRange(0, frame()->element_count() - 1);
4178 frame()->EmitPush(esi);
4179 frame()->EmitPush(Immediate(slot->var()->name()));
4180 if (typeof_state == INSIDE_TYPEOF) {
4181 result =
4182 frame()->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
4183 } else {
4184 result = frame()->CallRuntime(Runtime::kLoadContextSlot, 2);
4185 }
4186
4187 done.Bind(&result);
4188 return result;
4189
4190 } else if (slot->var()->mode() == Variable::CONST) {
4191 // Const slots may contain 'the hole' value (the constant hasn't been
4192 // initialized yet) which needs to be converted into the 'undefined'
4193 // value.
4194 //
4195 // We currently spill the virtual frame because constants use the
4196 // potentially unsafe direct-frame access of SlotOperand.
4197 VirtualFrame::SpilledScope spilled_scope;
4198 Comment cmnt(masm_, "[ Load const");
4199 Label exit;
4200 __ mov(ecx, SlotOperand(slot, ecx));
4201 __ cmp(ecx, Factory::the_hole_value());
4202 __ j(not_equal, &exit);
4203 __ mov(ecx, Factory::undefined_value());
4204 __ bind(&exit);
4205 return Result(ecx);
4206
4207 } else if (slot->type() == Slot::PARAMETER) {
4208 frame()->PushParameterAt(slot->index());
4209 return frame()->Pop();
4210
4211 } else if (slot->type() == Slot::LOCAL) {
4212 frame()->PushLocalAt(slot->index());
4213 return frame()->Pop();
4214
4215 } else {
4216 // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
4217 // here.
4218 //
4219 // The use of SlotOperand below is safe for an unspilled frame
4220 // because it will always be a context slot.
4221 ASSERT(slot->type() == Slot::CONTEXT);
4222 result = allocator()->Allocate();
4223 ASSERT(result.is_valid());
4224 __ mov(result.reg(), SlotOperand(slot, result.reg()));
4225 return result;
4226 }
4227 }
4228
4229
LoadFromSlotCheckForArguments(Slot * slot,TypeofState state)4230 Result CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
4231 TypeofState state) {
4232 Result result = LoadFromSlot(slot, state);
4233
4234 // Bail out quickly if we're not using lazy arguments allocation.
4235 if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return result;
4236
4237 // ... or if the slot isn't a non-parameter arguments slot.
4238 if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return result;
4239
4240 // If the loaded value is a constant, we know if the arguments
4241 // object has been lazily loaded yet.
4242 if (result.is_constant()) {
4243 if (result.handle()->IsTheHole()) {
4244 result.Unuse();
4245 return StoreArgumentsObject(false);
4246 } else {
4247 return result;
4248 }
4249 }
4250
4251 // The loaded value is in a register. If it is the sentinel that
4252 // indicates that we haven't loaded the arguments object yet, we
4253 // need to do it now.
4254 JumpTarget exit;
4255 __ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
4256 exit.Branch(not_equal, &result);
4257
4258 result.Unuse();
4259 result = StoreArgumentsObject(false);
4260 exit.Bind(&result);
4261 return result;
4262 }
4263
4264
LoadFromGlobalSlotCheckExtensions(Slot * slot,TypeofState typeof_state,JumpTarget * slow)4265 Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
4266 Slot* slot,
4267 TypeofState typeof_state,
4268 JumpTarget* slow) {
4269 // Check that no extension objects have been created by calls to
4270 // eval from the current scope to the global scope.
4271 Register context = esi;
4272 Result tmp = allocator_->Allocate();
4273 ASSERT(tmp.is_valid()); // All non-reserved registers were available.
4274
4275 Scope* s = scope();
4276 while (s != NULL) {
4277 if (s->num_heap_slots() > 0) {
4278 if (s->calls_eval()) {
4279 // Check that extension is NULL.
4280 __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
4281 Immediate(0));
4282 slow->Branch(not_equal, not_taken);
4283 }
4284 // Load next context in chain.
4285 __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
4286 __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4287 context = tmp.reg();
4288 }
4289 // If no outer scope calls eval, we do not need to check more
4290 // context extensions. If we have reached an eval scope, we check
4291 // all extensions from this point.
4292 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
4293 s = s->outer_scope();
4294 }
4295
4296 if (s != NULL && s->is_eval_scope()) {
4297 // Loop up the context chain. There is no frame effect so it is
4298 // safe to use raw labels here.
4299 Label next, fast;
4300 if (!context.is(tmp.reg())) {
4301 __ mov(tmp.reg(), context);
4302 }
4303 __ bind(&next);
4304 // Terminate at global context.
4305 __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
4306 Immediate(Factory::global_context_map()));
4307 __ j(equal, &fast);
4308 // Check that extension is NULL.
4309 __ cmp(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
4310 slow->Branch(not_equal, not_taken);
4311 // Load next context in chain.
4312 __ mov(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
4313 __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4314 __ jmp(&next);
4315 __ bind(&fast);
4316 }
4317 tmp.Unuse();
4318
4319 // All extension objects were empty and it is safe to use a global
4320 // load IC call.
4321 // The register allocator prefers eax if it is free, so the code generator
4322 // will load the global object directly into eax, which is where the LoadIC
4323 // expects it.
4324 frame_->Spill(eax);
4325 LoadGlobal();
4326 frame_->Push(slot->var()->name());
4327 RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
4328 ? RelocInfo::CODE_TARGET
4329 : RelocInfo::CODE_TARGET_CONTEXT;
4330 Result answer = frame_->CallLoadIC(mode);
4331 // A test eax instruction following the call signals that the inobject
4332 // property case was inlined. Ensure that there is not a test eax
4333 // instruction here.
4334 __ nop();
4335 return answer;
4336 }
4337
4338
StoreToSlot(Slot * slot,InitState init_state)4339 void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
4340 if (slot->type() == Slot::LOOKUP) {
4341 ASSERT(slot->var()->is_dynamic());
4342
4343 // For now, just do a runtime call. Since the call is inevitable,
4344 // we eagerly sync the virtual frame so we can directly push the
4345 // arguments into place.
4346 frame_->SyncRange(0, frame_->element_count() - 1);
4347
4348 frame_->EmitPush(esi);
4349 frame_->EmitPush(Immediate(slot->var()->name()));
4350
4351 Result value;
4352 if (init_state == CONST_INIT) {
4353 // Same as the case for a normal store, but ignores attribute
4354 // (e.g. READ_ONLY) of context slot so that we can initialize const
4355 // properties (introduced via eval("const foo = (some expr);")). Also,
4356 // uses the current function context instead of the top context.
4357 //
4358 // Note that we must declare the foo upon entry of eval(), via a
4359 // context slot declaration, but we cannot initialize it at the same
4360 // time, because the const declaration may be at the end of the eval
4361 // code (sigh...) and the const variable may have been used before
4362 // (where its value is 'undefined'). Thus, we can only do the
4363 // initialization when we actually encounter the expression and when
4364 // the expression operands are defined and valid, and thus we need the
4365 // split into 2 operations: declaration of the context slot followed
4366 // by initialization.
4367 value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
4368 } else {
4369 value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
4370 }
4371 // Storing a variable must keep the (new) value on the expression
4372 // stack. This is necessary for compiling chained assignment
4373 // expressions.
4374 frame_->Push(&value);
4375
4376 } else {
4377 ASSERT(!slot->var()->is_dynamic());
4378
4379 JumpTarget exit;
4380 if (init_state == CONST_INIT) {
4381 ASSERT(slot->var()->mode() == Variable::CONST);
4382 // Only the first const initialization must be executed (the slot
4383 // still contains 'the hole' value). When the assignment is executed,
4384 // the code is identical to a normal store (see below).
4385 //
4386 // We spill the frame in the code below because the direct-frame
4387 // access of SlotOperand is potentially unsafe with an unspilled
4388 // frame.
4389 VirtualFrame::SpilledScope spilled_scope;
4390 Comment cmnt(masm_, "[ Init const");
4391 __ mov(ecx, SlotOperand(slot, ecx));
4392 __ cmp(ecx, Factory::the_hole_value());
4393 exit.Branch(not_equal);
4394 }
4395
4396 // We must execute the store. Storing a variable must keep the (new)
4397 // value on the stack. This is necessary for compiling assignment
4398 // expressions.
4399 //
4400 // Note: We will reach here even with slot->var()->mode() ==
4401 // Variable::CONST because of const declarations which will initialize
4402 // consts to 'the hole' value and by doing so, end up calling this code.
4403 if (slot->type() == Slot::PARAMETER) {
4404 frame_->StoreToParameterAt(slot->index());
4405 } else if (slot->type() == Slot::LOCAL) {
4406 frame_->StoreToLocalAt(slot->index());
4407 } else {
4408 // The other slot types (LOOKUP and GLOBAL) cannot reach here.
4409 //
4410 // The use of SlotOperand below is safe for an unspilled frame
4411 // because the slot is a context slot.
4412 ASSERT(slot->type() == Slot::CONTEXT);
4413 frame_->Dup();
4414 Result value = frame_->Pop();
4415 value.ToRegister();
4416 Result start = allocator_->Allocate();
4417 ASSERT(start.is_valid());
4418 __ mov(SlotOperand(slot, start.reg()), value.reg());
4419 // RecordWrite may destroy the value registers.
4420 //
4421 // TODO(204): Avoid actually spilling when the value is not
4422 // needed (probably the common case).
4423 frame_->Spill(value.reg());
4424 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
4425 Result temp = allocator_->Allocate();
4426 ASSERT(temp.is_valid());
4427 __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
4428 // The results start, value, and temp are unused by going out of
4429 // scope.
4430 }
4431
4432 exit.Bind();
4433 }
4434 }
4435
4436
VisitSlot(Slot * node)4437 void CodeGenerator::VisitSlot(Slot* node) {
4438 Comment cmnt(masm_, "[ Slot");
4439 Result result = LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
4440 frame()->Push(&result);
4441 }
4442
4443
VisitVariableProxy(VariableProxy * node)4444 void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
4445 Comment cmnt(masm_, "[ VariableProxy");
4446 Variable* var = node->var();
4447 Expression* expr = var->rewrite();
4448 if (expr != NULL) {
4449 Visit(expr);
4450 } else {
4451 ASSERT(var->is_global());
4452 Reference ref(this, node);
4453 ref.GetValue();
4454 }
4455 }
4456
4457
VisitLiteral(Literal * node)4458 void CodeGenerator::VisitLiteral(Literal* node) {
4459 Comment cmnt(masm_, "[ Literal");
4460 frame_->Push(node->handle());
4461 }
4462
4463
PushUnsafeSmi(Handle<Object> value)4464 void CodeGenerator::PushUnsafeSmi(Handle<Object> value) {
4465 ASSERT(value->IsSmi());
4466 int bits = reinterpret_cast<int>(*value);
4467 __ push(Immediate(bits & 0x0000FFFF));
4468 __ or_(Operand(esp, 0), Immediate(bits & 0xFFFF0000));
4469 }
4470
4471
StoreUnsafeSmiToLocal(int offset,Handle<Object> value)4472 void CodeGenerator::StoreUnsafeSmiToLocal(int offset, Handle<Object> value) {
4473 ASSERT(value->IsSmi());
4474 int bits = reinterpret_cast<int>(*value);
4475 __ mov(Operand(ebp, offset), Immediate(bits & 0x0000FFFF));
4476 __ or_(Operand(ebp, offset), Immediate(bits & 0xFFFF0000));
4477 }
4478
4479
MoveUnsafeSmi(Register target,Handle<Object> value)4480 void CodeGenerator::MoveUnsafeSmi(Register target, Handle<Object> value) {
4481 ASSERT(target.is_valid());
4482 ASSERT(value->IsSmi());
4483 int bits = reinterpret_cast<int>(*value);
4484 __ Set(target, Immediate(bits & 0x0000FFFF));
4485 __ or_(target, bits & 0xFFFF0000);
4486 }
4487
4488
IsUnsafeSmi(Handle<Object> value)4489 bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
4490 if (!value->IsSmi()) return false;
4491 int int_value = Smi::cast(*value)->value();
4492 return !is_intn(int_value, kMaxSmiInlinedBits);
4493 }
4494
4495
4496 // Materialize the regexp literal 'node' in the literals array
4497 // 'literals' of the function. Leave the regexp boilerplate in
4498 // 'boilerplate'.
4499 class DeferredRegExpLiteral: public DeferredCode {
4500 public:
DeferredRegExpLiteral(Register boilerplate,Register literals,RegExpLiteral * node)4501 DeferredRegExpLiteral(Register boilerplate,
4502 Register literals,
4503 RegExpLiteral* node)
4504 : boilerplate_(boilerplate), literals_(literals), node_(node) {
4505 set_comment("[ DeferredRegExpLiteral");
4506 }
4507
4508 void Generate();
4509
4510 private:
4511 Register boilerplate_;
4512 Register literals_;
4513 RegExpLiteral* node_;
4514 };
4515
4516
Generate()4517 void DeferredRegExpLiteral::Generate() {
4518 // Since the entry is undefined we call the runtime system to
4519 // compute the literal.
4520 // Literal array (0).
4521 __ push(literals_);
4522 // Literal index (1).
4523 __ push(Immediate(Smi::FromInt(node_->literal_index())));
4524 // RegExp pattern (2).
4525 __ push(Immediate(node_->pattern()));
4526 // RegExp flags (3).
4527 __ push(Immediate(node_->flags()));
4528 __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
4529 if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
4530 }
4531
4532
VisitRegExpLiteral(RegExpLiteral * node)4533 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
4534 Comment cmnt(masm_, "[ RegExp Literal");
4535
4536 // Retrieve the literals array and check the allocated entry. Begin
4537 // with a writable copy of the function of this activation in a
4538 // register.
4539 frame_->PushFunction();
4540 Result literals = frame_->Pop();
4541 literals.ToRegister();
4542 frame_->Spill(literals.reg());
4543
4544 // Load the literals array of the function.
4545 __ mov(literals.reg(),
4546 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
4547
4548 // Load the literal at the ast saved index.
4549 Result boilerplate = allocator_->Allocate();
4550 ASSERT(boilerplate.is_valid());
4551 int literal_offset =
4552 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
4553 __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
4554
4555 // Check whether we need to materialize the RegExp object. If so,
4556 // jump to the deferred code passing the literals array.
4557 DeferredRegExpLiteral* deferred =
4558 new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
4559 __ cmp(boilerplate.reg(), Factory::undefined_value());
4560 deferred->Branch(equal);
4561 deferred->BindExit();
4562 literals.Unuse();
4563
4564 // Push the boilerplate object.
4565 frame_->Push(&boilerplate);
4566 }
4567
4568
VisitObjectLiteral(ObjectLiteral * node)4569 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
4570 Comment cmnt(masm_, "[ ObjectLiteral");
4571
4572 // Load a writable copy of the function of this activation in a
4573 // register.
4574 frame_->PushFunction();
4575 Result literals = frame_->Pop();
4576 literals.ToRegister();
4577 frame_->Spill(literals.reg());
4578
4579 // Load the literals array of the function.
4580 __ mov(literals.reg(),
4581 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
4582 // Literal array.
4583 frame_->Push(&literals);
4584 // Literal index.
4585 frame_->Push(Smi::FromInt(node->literal_index()));
4586 // Constant properties.
4587 frame_->Push(node->constant_properties());
4588 Result clone;
4589 if (node->depth() > 1) {
4590 clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
4591 } else {
4592 clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
4593 }
4594 frame_->Push(&clone);
4595
4596 for (int i = 0; i < node->properties()->length(); i++) {
4597 ObjectLiteral::Property* property = node->properties()->at(i);
4598 switch (property->kind()) {
4599 case ObjectLiteral::Property::CONSTANT:
4600 break;
4601 case ObjectLiteral::Property::MATERIALIZED_LITERAL:
4602 if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
4603 // else fall through.
4604 case ObjectLiteral::Property::COMPUTED: {
4605 Handle<Object> key(property->key()->handle());
4606 if (key->IsSymbol()) {
4607 // Duplicate the object as the IC receiver.
4608 frame_->Dup();
4609 Load(property->value());
4610 Result dummy = frame_->CallStoreIC(Handle<String>::cast(key), false);
4611 dummy.Unuse();
4612 break;
4613 }
4614 // Fall through
4615 }
4616 case ObjectLiteral::Property::PROTOTYPE: {
4617 // Duplicate the object as an argument to the runtime call.
4618 frame_->Dup();
4619 Load(property->key());
4620 Load(property->value());
4621 Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
4622 // Ignore the result.
4623 break;
4624 }
4625 case ObjectLiteral::Property::SETTER: {
4626 // Duplicate the object as an argument to the runtime call.
4627 frame_->Dup();
4628 Load(property->key());
4629 frame_->Push(Smi::FromInt(1));
4630 Load(property->value());
4631 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
4632 // Ignore the result.
4633 break;
4634 }
4635 case ObjectLiteral::Property::GETTER: {
4636 // Duplicate the object as an argument to the runtime call.
4637 frame_->Dup();
4638 Load(property->key());
4639 frame_->Push(Smi::FromInt(0));
4640 Load(property->value());
4641 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
4642 // Ignore the result.
4643 break;
4644 }
4645 default: UNREACHABLE();
4646 }
4647 }
4648 }
4649
4650
VisitArrayLiteral(ArrayLiteral * node)4651 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
4652 Comment cmnt(masm_, "[ ArrayLiteral");
4653
4654 // Load a writable copy of the function of this activation in a
4655 // register.
4656 frame_->PushFunction();
4657 Result literals = frame_->Pop();
4658 literals.ToRegister();
4659 frame_->Spill(literals.reg());
4660
4661 // Load the literals array of the function.
4662 __ mov(literals.reg(),
4663 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
4664
4665 frame_->Push(&literals);
4666 frame_->Push(Smi::FromInt(node->literal_index()));
4667 frame_->Push(node->constant_elements());
4668 int length = node->values()->length();
4669 Result clone;
4670 if (node->depth() > 1) {
4671 clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
4672 } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
4673 clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
4674 } else {
4675 FastCloneShallowArrayStub stub(length);
4676 clone = frame_->CallStub(&stub, 3);
4677 }
4678 frame_->Push(&clone);
4679
4680 // Generate code to set the elements in the array that are not
4681 // literals.
4682 for (int i = 0; i < length; i++) {
4683 Expression* value = node->values()->at(i);
4684
4685 // If value is a literal the property value is already set in the
4686 // boilerplate object.
4687 if (value->AsLiteral() != NULL) continue;
4688 // If value is a materialized literal the property value is already set
4689 // in the boilerplate object if it is simple.
4690 if (CompileTimeValue::IsCompileTimeValue(value)) continue;
4691
4692 // The property must be set by generated code.
4693 Load(value);
4694
4695 // Get the property value off the stack.
4696 Result prop_value = frame_->Pop();
4697 prop_value.ToRegister();
4698
4699 // Fetch the array literal while leaving a copy on the stack and
4700 // use it to get the elements array.
4701 frame_->Dup();
4702 Result elements = frame_->Pop();
4703 elements.ToRegister();
4704 frame_->Spill(elements.reg());
4705 // Get the elements array.
4706 __ mov(elements.reg(),
4707 FieldOperand(elements.reg(), JSObject::kElementsOffset));
4708
4709 // Write to the indexed properties array.
4710 int offset = i * kPointerSize + FixedArray::kHeaderSize;
4711 __ mov(FieldOperand(elements.reg(), offset), prop_value.reg());
4712
4713 // Update the write barrier for the array address.
4714 frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
4715 Result scratch = allocator_->Allocate();
4716 ASSERT(scratch.is_valid());
4717 __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
4718 }
4719 }
4720
4721
VisitCatchExtensionObject(CatchExtensionObject * node)4722 void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
4723 ASSERT(!in_spilled_code());
4724 // Call runtime routine to allocate the catch extension object and
4725 // assign the exception value to the catch variable.
4726 Comment cmnt(masm_, "[ CatchExtensionObject");
4727 Load(node->key());
4728 Load(node->value());
4729 Result result =
4730 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
4731 frame_->Push(&result);
4732 }
4733
4734
EmitSlotAssignment(Assignment * node)4735 void CodeGenerator::EmitSlotAssignment(Assignment* node) {
4736 #ifdef DEBUG
4737 int original_height = frame()->height();
4738 #endif
4739 Comment cmnt(masm(), "[ Variable Assignment");
4740 Variable* var = node->target()->AsVariableProxy()->AsVariable();
4741 ASSERT(var != NULL);
4742 Slot* slot = var->slot();
4743 ASSERT(slot != NULL);
4744
4745 // Evaluate the right-hand side.
4746 if (node->is_compound()) {
4747 Result result = LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
4748 frame()->Push(&result);
4749 Load(node->value());
4750
4751 bool overwrite_value =
4752 (node->value()->AsBinaryOperation() != NULL &&
4753 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
4754 GenericBinaryOperation(node->binary_op(),
4755 node->type(),
4756 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
4757 } else {
4758 Load(node->value());
4759 }
4760
4761 // Perform the assignment.
4762 if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
4763 CodeForSourcePosition(node->position());
4764 StoreToSlot(slot,
4765 node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
4766 }
4767 ASSERT(frame()->height() == original_height + 1);
4768 }
4769
4770
EmitNamedPropertyAssignment(Assignment * node)4771 void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
4772 #ifdef DEBUG
4773 int original_height = frame()->height();
4774 #endif
4775 Comment cmnt(masm(), "[ Named Property Assignment");
4776 Variable* var = node->target()->AsVariableProxy()->AsVariable();
4777 Property* prop = node->target()->AsProperty();
4778 ASSERT(var == NULL || (prop == NULL && var->is_global()));
4779
4780 // Initialize name and evaluate the receiver subexpression if necessary.
4781 Handle<String> name;
4782 bool is_trivial_receiver = false;
4783 if (var != NULL) {
4784 name = var->name();
4785 } else {
4786 Literal* lit = prop->key()->AsLiteral();
4787 ASSERT_NOT_NULL(lit);
4788 name = Handle<String>::cast(lit->handle());
4789 // Do not materialize the receiver on the frame if it is trivial.
4790 is_trivial_receiver = prop->obj()->IsTrivial();
4791 if (!is_trivial_receiver) Load(prop->obj());
4792 }
4793
4794 if (node->starts_initialization_block()) {
4795 ASSERT_EQ(NULL, var);
4796 // Change to slow case in the beginning of an initialization block to
4797 // avoid the quadratic behavior of repeatedly adding fast properties.
4798 if (is_trivial_receiver) {
4799 frame()->Push(prop->obj());
4800 } else {
4801 frame()->Dup();
4802 }
4803 Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
4804 }
4805
4806 if (node->ends_initialization_block() && !is_trivial_receiver) {
4807 // Add an extra copy of the receiver to the frame, so that it can be
4808 // converted back to fast case after the assignment.
4809 frame()->Dup();
4810 }
4811
4812 // Evaluate the right-hand side.
4813 if (node->is_compound()) {
4814 if (is_trivial_receiver) {
4815 frame()->Push(prop->obj());
4816 } else if (var != NULL) {
4817 // The LoadIC stub expects the object in eax.
4818 // Freeing eax causes the code generator to load the global into it.
4819 frame_->Spill(eax);
4820 LoadGlobal();
4821 } else {
4822 frame()->Dup();
4823 }
4824 Result value = EmitNamedLoad(name, var != NULL);
4825 frame()->Push(&value);
4826 Load(node->value());
4827
4828 bool overwrite_value =
4829 (node->value()->AsBinaryOperation() != NULL &&
4830 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
4831 GenericBinaryOperation(node->binary_op(),
4832 node->type(),
4833 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
4834 } else {
4835 Load(node->value());
4836 }
4837
4838 // Perform the assignment. It is safe to ignore constants here.
4839 ASSERT(var == NULL || var->mode() != Variable::CONST);
4840 ASSERT_NE(Token::INIT_CONST, node->op());
4841 if (is_trivial_receiver) {
4842 Result value = frame()->Pop();
4843 frame()->Push(prop->obj());
4844 frame()->Push(&value);
4845 }
4846 CodeForSourcePosition(node->position());
4847 bool is_contextual = (var != NULL);
4848 Result answer = EmitNamedStore(name, is_contextual);
4849 frame()->Push(&answer);
4850
4851 if (node->ends_initialization_block()) {
4852 ASSERT_EQ(NULL, var);
4853 // The argument to the runtime call is the receiver.
4854 if (is_trivial_receiver) {
4855 frame()->Push(prop->obj());
4856 } else {
4857 // A copy of the receiver is below the value of the assignment. Swap
4858 // the receiver and the value of the assignment expression.
4859 Result result = frame()->Pop();
4860 Result receiver = frame()->Pop();
4861 frame()->Push(&result);
4862 frame()->Push(&receiver);
4863 }
4864 Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
4865 }
4866
4867 ASSERT_EQ(frame()->height(), original_height + 1);
4868 }
4869
4870
EmitKeyedPropertyAssignment(Assignment * node)4871 void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
4872 #ifdef DEBUG
4873 int original_height = frame()->height();
4874 #endif
4875 Comment cmnt(masm_, "[ Named Property Assignment");
4876 Property* prop = node->target()->AsProperty();
4877 ASSERT_NOT_NULL(prop);
4878
4879 // Evaluate the receiver subexpression.
4880 Load(prop->obj());
4881
4882 if (node->starts_initialization_block()) {
4883 // Change to slow case in the beginning of an initialization block to
4884 // avoid the quadratic behavior of repeatedly adding fast properties.
4885 frame_->Dup();
4886 Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
4887 }
4888
4889 if (node->ends_initialization_block()) {
4890 // Add an extra copy of the receiver to the frame, so that it can be
4891 // converted back to fast case after the assignment.
4892 frame_->Dup();
4893 }
4894
4895 // Evaluate the key subexpression.
4896 Load(prop->key());
4897
4898 // Evaluate the right-hand side.
4899 if (node->is_compound()) {
4900 // Duplicate receiver and key.
4901 frame()->PushElementAt(1);
4902 frame()->PushElementAt(1);
4903 Result value = EmitKeyedLoad();
4904 frame()->Push(&value);
4905 Load(node->value());
4906
4907 bool overwrite_value =
4908 (node->value()->AsBinaryOperation() != NULL &&
4909 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
4910 GenericBinaryOperation(node->binary_op(),
4911 node->type(),
4912 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
4913 } else {
4914 Load(node->value());
4915 }
4916
4917 // Perform the assignment. It is safe to ignore constants here.
4918 ASSERT(node->op() != Token::INIT_CONST);
4919 CodeForSourcePosition(node->position());
4920 Result answer = EmitKeyedStore(prop->key()->type());
4921 frame()->Push(&answer);
4922
4923 if (node->ends_initialization_block()) {
4924 // The argument to the runtime call is the extra copy of the receiver,
4925 // which is below the value of the assignment. Swap the receiver and
4926 // the value of the assignment expression.
4927 Result result = frame()->Pop();
4928 Result receiver = frame()->Pop();
4929 frame()->Push(&result);
4930 frame()->Push(&receiver);
4931 Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
4932 }
4933
4934 ASSERT(frame()->height() == original_height + 1);
4935 }
4936
4937
VisitAssignment(Assignment * node)4938 void CodeGenerator::VisitAssignment(Assignment* node) {
4939 #ifdef DEBUG
4940 int original_height = frame()->height();
4941 #endif
4942 Variable* var = node->target()->AsVariableProxy()->AsVariable();
4943 Property* prop = node->target()->AsProperty();
4944
4945 if (var != NULL && !var->is_global()) {
4946 EmitSlotAssignment(node);
4947
4948 } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
4949 (var != NULL && var->is_global())) {
4950 // Properties whose keys are property names and global variables are
4951 // treated as named property references. We do not need to consider
4952 // global 'this' because it is not a valid left-hand side.
4953 EmitNamedPropertyAssignment(node);
4954
4955 } else if (prop != NULL) {
4956 // Other properties (including rewritten parameters for a function that
4957 // uses arguments) are keyed property assignments.
4958 EmitKeyedPropertyAssignment(node);
4959
4960 } else {
4961 // Invalid left-hand side.
4962 Load(node->target());
4963 Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
4964 // The runtime call doesn't actually return but the code generator will
4965 // still generate code and expects a certain frame height.
4966 frame()->Push(&result);
4967 }
4968
4969 ASSERT(frame()->height() == original_height + 1);
4970 }
4971
4972
VisitThrow(Throw * node)4973 void CodeGenerator::VisitThrow(Throw* node) {
4974 Comment cmnt(masm_, "[ Throw");
4975 Load(node->exception());
4976 Result result = frame_->CallRuntime(Runtime::kThrow, 1);
4977 frame_->Push(&result);
4978 }
4979
4980
VisitProperty(Property * node)4981 void CodeGenerator::VisitProperty(Property* node) {
4982 Comment cmnt(masm_, "[ Property");
4983 Reference property(this, node);
4984 property.GetValue();
4985 }
4986
4987
VisitCall(Call * node)4988 void CodeGenerator::VisitCall(Call* node) {
4989 Comment cmnt(masm_, "[ Call");
4990
4991 Expression* function = node->expression();
4992 ZoneList<Expression*>* args = node->arguments();
4993
4994 // Check if the function is a variable or a property.
4995 Variable* var = function->AsVariableProxy()->AsVariable();
4996 Property* property = function->AsProperty();
4997
4998 // ------------------------------------------------------------------------
4999 // Fast-case: Use inline caching.
5000 // ---
5001 // According to ECMA-262, section 11.2.3, page 44, the function to call
5002 // must be resolved after the arguments have been evaluated. The IC code
5003 // automatically handles this by loading the arguments before the function
5004 // is resolved in cache misses (this also holds for megamorphic calls).
5005 // ------------------------------------------------------------------------
5006
5007 if (var != NULL && var->is_possibly_eval()) {
5008 // ----------------------------------
5009 // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
5010 // ----------------------------------
5011
5012 // In a call to eval, we first call %ResolvePossiblyDirectEval to
5013 // resolve the function we need to call and the receiver of the
5014 // call. Then we call the resolved function using the given
5015 // arguments.
5016
5017 // Prepare the stack for the call to the resolved function.
5018 Load(function);
5019
5020 // Allocate a frame slot for the receiver.
5021 frame_->Push(Factory::undefined_value());
5022 int arg_count = args->length();
5023 for (int i = 0; i < arg_count; i++) {
5024 Load(args->at(i));
5025 }
5026
5027 // Prepare the stack for the call to ResolvePossiblyDirectEval.
5028 frame_->PushElementAt(arg_count + 1);
5029 if (arg_count > 0) {
5030 frame_->PushElementAt(arg_count);
5031 } else {
5032 frame_->Push(Factory::undefined_value());
5033 }
5034
5035 // Push the receiver.
5036 frame_->PushParameterAt(-1);
5037
5038 // Resolve the call.
5039 Result result =
5040 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
5041
5042 // The runtime call returns a pair of values in eax (function) and
5043 // edx (receiver). Touch up the stack with the right values.
5044 Result receiver = allocator_->Allocate(edx);
5045 frame_->SetElementAt(arg_count + 1, &result);
5046 frame_->SetElementAt(arg_count, &receiver);
5047 receiver.Unuse();
5048
5049 // Call the function.
5050 CodeForSourcePosition(node->position());
5051 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
5052 CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
5053 result = frame_->CallStub(&call_function, arg_count + 1);
5054
5055 // Restore the context and overwrite the function on the stack with
5056 // the result.
5057 frame_->RestoreContextRegister();
5058 frame_->SetElementAt(0, &result);
5059
5060 } else if (var != NULL && !var->is_this() && var->is_global()) {
5061 // ----------------------------------
5062 // JavaScript example: 'foo(1, 2, 3)' // foo is global
5063 // ----------------------------------
5064
5065 // Pass the global object as the receiver and let the IC stub
5066 // patch the stack to use the global proxy as 'this' in the
5067 // invoked function.
5068 LoadGlobal();
5069
5070 // Load the arguments.
5071 int arg_count = args->length();
5072 for (int i = 0; i < arg_count; i++) {
5073 Load(args->at(i));
5074 }
5075
5076 // Push the name of the function onto the frame.
5077 frame_->Push(var->name());
5078
5079 // Call the IC initialization code.
5080 CodeForSourcePosition(node->position());
5081 Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
5082 arg_count,
5083 loop_nesting());
5084 frame_->RestoreContextRegister();
5085 frame_->Push(&result);
5086
5087 } else if (var != NULL && var->slot() != NULL &&
5088 var->slot()->type() == Slot::LOOKUP) {
5089 // ----------------------------------
5090 // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
5091 // ----------------------------------
5092
5093 // Load the function from the context. Sync the frame so we can
5094 // push the arguments directly into place.
5095 frame_->SyncRange(0, frame_->element_count() - 1);
5096 frame_->EmitPush(esi);
5097 frame_->EmitPush(Immediate(var->name()));
5098 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
5099 // The runtime call returns a pair of values in eax and edx. The
5100 // looked-up function is in eax and the receiver is in edx. These
5101 // register references are not ref counted here. We spill them
5102 // eagerly since they are arguments to an inevitable call (and are
5103 // not sharable by the arguments).
5104 ASSERT(!allocator()->is_used(eax));
5105 frame_->EmitPush(eax);
5106
5107 // Load the receiver.
5108 ASSERT(!allocator()->is_used(edx));
5109 frame_->EmitPush(edx);
5110
5111 // Call the function.
5112 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
5113
5114 } else if (property != NULL) {
5115 // Check if the key is a literal string.
5116 Literal* literal = property->key()->AsLiteral();
5117
5118 if (literal != NULL && literal->handle()->IsSymbol()) {
5119 // ------------------------------------------------------------------
5120 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
5121 // ------------------------------------------------------------------
5122
5123 Handle<String> name = Handle<String>::cast(literal->handle());
5124
5125 if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
5126 name->IsEqualTo(CStrVector("apply")) &&
5127 args->length() == 2 &&
5128 args->at(1)->AsVariableProxy() != NULL &&
5129 args->at(1)->AsVariableProxy()->IsArguments()) {
5130 // Use the optimized Function.prototype.apply that avoids
5131 // allocating lazily allocated arguments objects.
5132 CallApplyLazy(property->obj(),
5133 args->at(0),
5134 args->at(1)->AsVariableProxy(),
5135 node->position());
5136
5137 } else {
5138 // Push the receiver onto the frame.
5139 Load(property->obj());
5140
5141 // Load the arguments.
5142 int arg_count = args->length();
5143 for (int i = 0; i < arg_count; i++) {
5144 Load(args->at(i));
5145 }
5146
5147 // Push the name of the function onto the frame.
5148 frame_->Push(name);
5149
5150 // Call the IC initialization code.
5151 CodeForSourcePosition(node->position());
5152 Result result =
5153 frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count,
5154 loop_nesting());
5155 frame_->RestoreContextRegister();
5156 frame_->Push(&result);
5157 }
5158
5159 } else {
5160 // -------------------------------------------
5161 // JavaScript example: 'array[index](1, 2, 3)'
5162 // -------------------------------------------
5163
5164 // Load the function to call from the property through a reference.
5165
5166 // Pass receiver to called function.
5167 if (property->is_synthetic()) {
5168 Reference ref(this, property);
5169 ref.GetValue();
5170 // Use global object as receiver.
5171 LoadGlobalReceiver();
5172 } else {
5173 Load(property->obj());
5174 frame()->Dup();
5175 Load(property->key());
5176 Result function = EmitKeyedLoad();
5177 Result receiver = frame_->Pop();
5178 frame_->Push(&function);
5179 frame_->Push(&receiver);
5180 }
5181
5182 // Call the function.
5183 CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
5184 }
5185
5186 } else {
5187 // ----------------------------------
5188 // JavaScript example: 'foo(1, 2, 3)' // foo is not global
5189 // ----------------------------------
5190
5191 // Load the function.
5192 Load(function);
5193
5194 // Pass the global proxy as the receiver.
5195 LoadGlobalReceiver();
5196
5197 // Call the function.
5198 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
5199 }
5200 }
5201
5202
VisitCallNew(CallNew * node)5203 void CodeGenerator::VisitCallNew(CallNew* node) {
5204 Comment cmnt(masm_, "[ CallNew");
5205
5206 // According to ECMA-262, section 11.2.2, page 44, the function
5207 // expression in new calls must be evaluated before the
5208 // arguments. This is different from ordinary calls, where the
5209 // actual function to call is resolved after the arguments have been
5210 // evaluated.
5211
5212 // Compute function to call and use the global object as the
5213 // receiver. There is no need to use the global proxy here because
5214 // it will always be replaced with a newly allocated object.
5215 Load(node->expression());
5216 LoadGlobal();
5217
5218 // Push the arguments ("left-to-right") on the stack.
5219 ZoneList<Expression*>* args = node->arguments();
5220 int arg_count = args->length();
5221 for (int i = 0; i < arg_count; i++) {
5222 Load(args->at(i));
5223 }
5224
5225 // Call the construct call builtin that handles allocation and
5226 // constructor invocation.
5227 CodeForSourcePosition(node->position());
5228 Result result = frame_->CallConstructor(arg_count);
5229 // Replace the function on the stack with the result.
5230 frame_->SetElementAt(0, &result);
5231 }
5232
5233
GenerateIsSmi(ZoneList<Expression * > * args)5234 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
5235 ASSERT(args->length() == 1);
5236 Load(args->at(0));
5237 Result value = frame_->Pop();
5238 value.ToRegister();
5239 ASSERT(value.is_valid());
5240 __ test(value.reg(), Immediate(kSmiTagMask));
5241 value.Unuse();
5242 destination()->Split(zero);
5243 }
5244
5245
GenerateLog(ZoneList<Expression * > * args)5246 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
5247 // Conditionally generate a log call.
5248 // Args:
5249 // 0 (literal string): The type of logging (corresponds to the flags).
5250 // This is used to determine whether or not to generate the log call.
5251 // 1 (string): Format string. Access the string at argument index 2
5252 // with '%2s' (see Logger::LogRuntime for all the formats).
5253 // 2 (array): Arguments to the format string.
5254 ASSERT_EQ(args->length(), 3);
5255 #ifdef ENABLE_LOGGING_AND_PROFILING
5256 if (ShouldGenerateLog(args->at(0))) {
5257 Load(args->at(1));
5258 Load(args->at(2));
5259 frame_->CallRuntime(Runtime::kLog, 2);
5260 }
5261 #endif
5262 // Finally, we're expected to leave a value on the top of the stack.
5263 frame_->Push(Factory::undefined_value());
5264 }
5265
5266
GenerateIsNonNegativeSmi(ZoneList<Expression * > * args)5267 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
5268 ASSERT(args->length() == 1);
5269 Load(args->at(0));
5270 Result value = frame_->Pop();
5271 value.ToRegister();
5272 ASSERT(value.is_valid());
5273 __ test(value.reg(), Immediate(kSmiTagMask | 0x80000000));
5274 value.Unuse();
5275 destination()->Split(zero);
5276 }
5277
5278
5279 // This generates code that performs a charCodeAt() call or returns
5280 // undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
5281 // It can handle flat, 8 and 16 bit characters and cons strings where the
5282 // answer is found in the left hand branch of the cons. The slow case will
5283 // flatten the string, which will ensure that the answer is in the left hand
5284 // side the next time around.
GenerateFastCharCodeAt(ZoneList<Expression * > * args)5285 void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
5286 Comment(masm_, "[ GenerateFastCharCodeAt");
5287 ASSERT(args->length() == 2);
5288
5289 Label slow_case;
5290 Label end;
5291 Label not_a_flat_string;
5292 Label try_again_with_new_string;
5293 Label ascii_string;
5294 Label got_char_code;
5295
5296 Load(args->at(0));
5297 Load(args->at(1));
5298 Result index = frame_->Pop();
5299 Result object = frame_->Pop();
5300
5301 // Get register ecx to use as shift amount later.
5302 Result shift_amount;
5303 if (object.is_register() && object.reg().is(ecx)) {
5304 Result fresh = allocator_->Allocate();
5305 shift_amount = object;
5306 object = fresh;
5307 __ mov(object.reg(), ecx);
5308 }
5309 if (index.is_register() && index.reg().is(ecx)) {
5310 Result fresh = allocator_->Allocate();
5311 shift_amount = index;
5312 index = fresh;
5313 __ mov(index.reg(), ecx);
5314 }
5315 // There could be references to ecx in the frame. Allocating will
5316 // spill them, otherwise spill explicitly.
5317 if (shift_amount.is_valid()) {
5318 frame_->Spill(ecx);
5319 } else {
5320 shift_amount = allocator()->Allocate(ecx);
5321 }
5322 ASSERT(shift_amount.is_register());
5323 ASSERT(shift_amount.reg().is(ecx));
5324 ASSERT(allocator_->count(ecx) == 1);
5325
5326 // We will mutate the index register and possibly the object register.
5327 // The case where they are somehow the same register is handled
5328 // because we only mutate them in the case where the receiver is a
5329 // heap object and the index is not.
5330 object.ToRegister();
5331 index.ToRegister();
5332 frame_->Spill(object.reg());
5333 frame_->Spill(index.reg());
5334
5335 // We need a single extra temporary register.
5336 Result temp = allocator()->Allocate();
5337 ASSERT(temp.is_valid());
5338
5339 // There is no virtual frame effect from here up to the final result
5340 // push.
5341
5342 // If the receiver is a smi trigger the slow case.
5343 ASSERT(kSmiTag == 0);
5344 __ test(object.reg(), Immediate(kSmiTagMask));
5345 __ j(zero, &slow_case);
5346
5347 // If the index is negative or non-smi trigger the slow case.
5348 ASSERT(kSmiTag == 0);
5349 __ test(index.reg(), Immediate(kSmiTagMask | 0x80000000));
5350 __ j(not_zero, &slow_case);
5351 // Untag the index.
5352 __ SmiUntag(index.reg());
5353
5354 __ bind(&try_again_with_new_string);
5355 // Fetch the instance type of the receiver into ecx.
5356 __ mov(ecx, FieldOperand(object.reg(), HeapObject::kMapOffset));
5357 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5358 // If the receiver is not a string trigger the slow case.
5359 __ test(ecx, Immediate(kIsNotStringMask));
5360 __ j(not_zero, &slow_case);
5361
5362 // Fetch the length field into the temporary register.
5363 __ mov(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
5364 // Check for index out of range.
5365 __ cmp(index.reg(), Operand(temp.reg()));
5366 __ j(greater_equal, &slow_case);
5367 // Reload the instance type (into the temp register this time)..
5368 __ mov(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
5369 __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
5370
5371 // We need special handling for non-flat strings.
5372 ASSERT(kSeqStringTag == 0);
5373 __ test(temp.reg(), Immediate(kStringRepresentationMask));
5374 __ j(not_zero, ¬_a_flat_string);
5375 // Check for 1-byte or 2-byte string.
5376 __ test(temp.reg(), Immediate(kStringEncodingMask));
5377 __ j(not_zero, &ascii_string);
5378
5379 // 2-byte string.
5380 // Load the 2-byte character code into the temp register.
5381 __ movzx_w(temp.reg(), FieldOperand(object.reg(),
5382 index.reg(),
5383 times_2,
5384 SeqTwoByteString::kHeaderSize));
5385 __ jmp(&got_char_code);
5386
5387 // ASCII string.
5388 __ bind(&ascii_string);
5389 // Load the byte into the temp register.
5390 __ movzx_b(temp.reg(), FieldOperand(object.reg(),
5391 index.reg(),
5392 times_1,
5393 SeqAsciiString::kHeaderSize));
5394 __ bind(&got_char_code);
5395 __ SmiTag(temp.reg());
5396 __ jmp(&end);
5397
5398 // Handle non-flat strings.
5399 __ bind(¬_a_flat_string);
5400 __ and_(temp.reg(), kStringRepresentationMask);
5401 __ cmp(temp.reg(), kConsStringTag);
5402 __ j(not_equal, &slow_case);
5403
5404 // ConsString.
5405 // Check that the right hand side is the empty string (ie if this is really a
5406 // flat string in a cons string). If that is not the case we would rather go
5407 // to the runtime system now, to flatten the string.
5408 __ mov(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset));
5409 __ cmp(Operand(temp.reg()), Factory::empty_string());
5410 __ j(not_equal, &slow_case);
5411 // Get the first of the two strings.
5412 __ mov(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
5413 __ jmp(&try_again_with_new_string);
5414
5415 __ bind(&slow_case);
5416 // Move the undefined value into the result register, which will
5417 // trigger the slow case.
5418 __ Set(temp.reg(), Immediate(Factory::undefined_value()));
5419
5420 __ bind(&end);
5421 frame_->Push(&temp);
5422 }
5423
5424
GenerateIsArray(ZoneList<Expression * > * args)5425 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
5426 ASSERT(args->length() == 1);
5427 Load(args->at(0));
5428 Result value = frame_->Pop();
5429 value.ToRegister();
5430 ASSERT(value.is_valid());
5431 __ test(value.reg(), Immediate(kSmiTagMask));
5432 destination()->false_target()->Branch(equal);
5433 // It is a heap object - get map.
5434 Result temp = allocator()->Allocate();
5435 ASSERT(temp.is_valid());
5436 // Check if the object is a JS array or not.
5437 __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, temp.reg());
5438 value.Unuse();
5439 temp.Unuse();
5440 destination()->Split(equal);
5441 }
5442
5443
GenerateIsRegExp(ZoneList<Expression * > * args)5444 void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
5445 ASSERT(args->length() == 1);
5446 Load(args->at(0));
5447 Result value = frame_->Pop();
5448 value.ToRegister();
5449 ASSERT(value.is_valid());
5450 __ test(value.reg(), Immediate(kSmiTagMask));
5451 destination()->false_target()->Branch(equal);
5452 // It is a heap object - get map.
5453 Result temp = allocator()->Allocate();
5454 ASSERT(temp.is_valid());
5455 // Check if the object is a regexp.
5456 __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, temp.reg());
5457 value.Unuse();
5458 temp.Unuse();
5459 destination()->Split(equal);
5460 }
5461
5462
GenerateIsObject(ZoneList<Expression * > * args)5463 void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
5464 // This generates a fast version of:
5465 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
5466 ASSERT(args->length() == 1);
5467 Load(args->at(0));
5468 Result obj = frame_->Pop();
5469 obj.ToRegister();
5470
5471 __ test(obj.reg(), Immediate(kSmiTagMask));
5472 destination()->false_target()->Branch(zero);
5473 __ cmp(obj.reg(), Factory::null_value());
5474 destination()->true_target()->Branch(equal);
5475
5476 Result map = allocator()->Allocate();
5477 ASSERT(map.is_valid());
5478 __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
5479 // Undetectable objects behave like undefined when tested with typeof.
5480 __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
5481 __ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
5482 destination()->false_target()->Branch(not_zero);
5483 __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
5484 __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
5485 __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
5486 destination()->false_target()->Branch(less);
5487 __ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
5488 obj.Unuse();
5489 map.Unuse();
5490 destination()->Split(less_equal);
5491 }
5492
5493
GenerateIsFunction(ZoneList<Expression * > * args)5494 void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
5495 // This generates a fast version of:
5496 // (%_ClassOf(arg) === 'Function')
5497 ASSERT(args->length() == 1);
5498 Load(args->at(0));
5499 Result obj = frame_->Pop();
5500 obj.ToRegister();
5501 __ test(obj.reg(), Immediate(kSmiTagMask));
5502 destination()->false_target()->Branch(zero);
5503 Result temp = allocator()->Allocate();
5504 ASSERT(temp.is_valid());
5505 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, temp.reg());
5506 obj.Unuse();
5507 temp.Unuse();
5508 destination()->Split(equal);
5509 }
5510
5511
GenerateIsUndetectableObject(ZoneList<Expression * > * args)5512 void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
5513 ASSERT(args->length() == 1);
5514 Load(args->at(0));
5515 Result obj = frame_->Pop();
5516 obj.ToRegister();
5517 __ test(obj.reg(), Immediate(kSmiTagMask));
5518 destination()->false_target()->Branch(zero);
5519 Result temp = allocator()->Allocate();
5520 ASSERT(temp.is_valid());
5521 __ mov(temp.reg(),
5522 FieldOperand(obj.reg(), HeapObject::kMapOffset));
5523 __ movzx_b(temp.reg(),
5524 FieldOperand(temp.reg(), Map::kBitFieldOffset));
5525 __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
5526 obj.Unuse();
5527 temp.Unuse();
5528 destination()->Split(not_zero);
5529 }
5530
5531
GenerateIsConstructCall(ZoneList<Expression * > * args)5532 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
5533 ASSERT(args->length() == 0);
5534
5535 // Get the frame pointer for the calling frame.
5536 Result fp = allocator()->Allocate();
5537 __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
5538
5539 // Skip the arguments adaptor frame if it exists.
5540 Label check_frame_marker;
5541 __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
5542 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5543 __ j(not_equal, &check_frame_marker);
5544 __ mov(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
5545
5546 // Check the marker in the calling frame.
5547 __ bind(&check_frame_marker);
5548 __ cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
5549 Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
5550 fp.Unuse();
5551 destination()->Split(equal);
5552 }
5553
5554
GenerateArgumentsLength(ZoneList<Expression * > * args)5555 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
5556 ASSERT(args->length() == 0);
5557 // ArgumentsAccessStub takes the parameter count as an input argument
5558 // in register eax. Create a constant result for it.
5559 Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
5560 // Call the shared stub to get to the arguments.length.
5561 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
5562 Result result = frame_->CallStub(&stub, &count);
5563 frame_->Push(&result);
5564 }
5565
5566
GenerateClassOf(ZoneList<Expression * > * args)5567 void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
5568 ASSERT(args->length() == 1);
5569 JumpTarget leave, null, function, non_function_constructor;
5570 Load(args->at(0)); // Load the object.
5571 Result obj = frame_->Pop();
5572 obj.ToRegister();
5573 frame_->Spill(obj.reg());
5574
5575 // If the object is a smi, we return null.
5576 __ test(obj.reg(), Immediate(kSmiTagMask));
5577 null.Branch(zero);
5578
5579 // Check that the object is a JS object but take special care of JS
5580 // functions to make sure they have 'Function' as their class.
5581 { Result tmp = allocator()->Allocate();
5582 __ mov(obj.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
5583 __ movzx_b(tmp.reg(), FieldOperand(obj.reg(), Map::kInstanceTypeOffset));
5584 __ cmp(tmp.reg(), FIRST_JS_OBJECT_TYPE);
5585 null.Branch(less);
5586
5587 // As long as JS_FUNCTION_TYPE is the last instance type and it is
5588 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
5589 // LAST_JS_OBJECT_TYPE.
5590 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
5591 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
5592 __ cmp(tmp.reg(), JS_FUNCTION_TYPE);
5593 function.Branch(equal);
5594 }
5595
5596 // Check if the constructor in the map is a function.
5597 { Result tmp = allocator()->Allocate();
5598 __ mov(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
5599 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, tmp.reg());
5600 non_function_constructor.Branch(not_equal);
5601 }
5602
5603 // The map register now contains the constructor function. Grab the
5604 // instance class name from there.
5605 __ mov(obj.reg(),
5606 FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
5607 __ mov(obj.reg(),
5608 FieldOperand(obj.reg(), SharedFunctionInfo::kInstanceClassNameOffset));
5609 frame_->Push(&obj);
5610 leave.Jump();
5611
5612 // Functions have class 'Function'.
5613 function.Bind();
5614 frame_->Push(Factory::function_class_symbol());
5615 leave.Jump();
5616
5617 // Objects with a non-function constructor have class 'Object'.
5618 non_function_constructor.Bind();
5619 frame_->Push(Factory::Object_symbol());
5620 leave.Jump();
5621
5622 // Non-JS objects have class null.
5623 null.Bind();
5624 frame_->Push(Factory::null_value());
5625
5626 // All done.
5627 leave.Bind();
5628 }
5629
5630
GenerateValueOf(ZoneList<Expression * > * args)5631 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
5632 ASSERT(args->length() == 1);
5633 JumpTarget leave;
5634 Load(args->at(0)); // Load the object.
5635 frame_->Dup();
5636 Result object = frame_->Pop();
5637 object.ToRegister();
5638 ASSERT(object.is_valid());
5639 // if (object->IsSmi()) return object.
5640 __ test(object.reg(), Immediate(kSmiTagMask));
5641 leave.Branch(zero, taken);
5642 // It is a heap object - get map.
5643 Result temp = allocator()->Allocate();
5644 ASSERT(temp.is_valid());
5645 // if (!object->IsJSValue()) return object.
5646 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
5647 leave.Branch(not_equal, not_taken);
5648 __ mov(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
5649 object.Unuse();
5650 frame_->SetElementAt(0, &temp);
5651 leave.Bind();
5652 }
5653
5654
GenerateSetValueOf(ZoneList<Expression * > * args)5655 void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
5656 ASSERT(args->length() == 2);
5657 JumpTarget leave;
5658 Load(args->at(0)); // Load the object.
5659 Load(args->at(1)); // Load the value.
5660 Result value = frame_->Pop();
5661 Result object = frame_->Pop();
5662 value.ToRegister();
5663 object.ToRegister();
5664
5665 // if (object->IsSmi()) return value.
5666 __ test(object.reg(), Immediate(kSmiTagMask));
5667 leave.Branch(zero, &value, taken);
5668
5669 // It is a heap object - get its map.
5670 Result scratch = allocator_->Allocate();
5671 ASSERT(scratch.is_valid());
5672 // if (!object->IsJSValue()) return value.
5673 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
5674 leave.Branch(not_equal, &value, not_taken);
5675
5676 // Store the value.
5677 __ mov(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
5678 // Update the write barrier. Save the value as it will be
5679 // overwritten by the write barrier code and is needed afterward.
5680 Result duplicate_value = allocator_->Allocate();
5681 ASSERT(duplicate_value.is_valid());
5682 __ mov(duplicate_value.reg(), value.reg());
5683 // The object register is also overwritten by the write barrier and
5684 // possibly aliased in the frame.
5685 frame_->Spill(object.reg());
5686 __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
5687 scratch.reg());
5688 object.Unuse();
5689 scratch.Unuse();
5690 duplicate_value.Unuse();
5691
5692 // Leave.
5693 leave.Bind(&value);
5694 frame_->Push(&value);
5695 }
5696
5697
GenerateArgumentsAccess(ZoneList<Expression * > * args)5698 void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
5699 ASSERT(args->length() == 1);
5700
5701 // ArgumentsAccessStub expects the key in edx and the formal
5702 // parameter count in eax.
5703 Load(args->at(0));
5704 Result key = frame_->Pop();
5705 // Explicitly create a constant result.
5706 Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
5707 // Call the shared stub to get to arguments[key].
5708 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
5709 Result result = frame_->CallStub(&stub, &key, &count);
5710 frame_->Push(&result);
5711 }
5712
5713
GenerateObjectEquals(ZoneList<Expression * > * args)5714 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
5715 ASSERT(args->length() == 2);
5716
5717 // Load the two objects into registers and perform the comparison.
5718 Load(args->at(0));
5719 Load(args->at(1));
5720 Result right = frame_->Pop();
5721 Result left = frame_->Pop();
5722 right.ToRegister();
5723 left.ToRegister();
5724 __ cmp(right.reg(), Operand(left.reg()));
5725 right.Unuse();
5726 left.Unuse();
5727 destination()->Split(equal);
5728 }
5729
5730
GenerateGetFramePointer(ZoneList<Expression * > * args)5731 void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
5732 ASSERT(args->length() == 0);
5733 ASSERT(kSmiTag == 0); // EBP value is aligned, so it should look like Smi.
5734 Result ebp_as_smi = allocator_->Allocate();
5735 ASSERT(ebp_as_smi.is_valid());
5736 __ mov(ebp_as_smi.reg(), Operand(ebp));
5737 frame_->Push(&ebp_as_smi);
5738 }
5739
5740
GenerateRandomPositiveSmi(ZoneList<Expression * > * args)5741 void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
5742 ASSERT(args->length() == 0);
5743 frame_->SpillAll();
5744
5745 // Make sure the frame is aligned like the OS expects.
5746 static const int kFrameAlignment = OS::ActivationFrameAlignment();
5747 if (kFrameAlignment > 0) {
5748 ASSERT(IsPowerOf2(kFrameAlignment));
5749 __ mov(edi, Operand(esp)); // Save in callee-saved register.
5750 __ and_(esp, -kFrameAlignment);
5751 }
5752
5753 // Call V8::RandomPositiveSmi().
5754 __ call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
5755
5756 // Restore stack pointer from callee-saved register edi.
5757 if (kFrameAlignment > 0) {
5758 __ mov(esp, Operand(edi));
5759 }
5760
5761 Result result = allocator_->Allocate(eax);
5762 frame_->Push(&result);
5763 }
5764
5765
GenerateStringAdd(ZoneList<Expression * > * args)5766 void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
5767 ASSERT_EQ(2, args->length());
5768
5769 Load(args->at(0));
5770 Load(args->at(1));
5771
5772 StringAddStub stub(NO_STRING_ADD_FLAGS);
5773 Result answer = frame_->CallStub(&stub, 2);
5774 frame_->Push(&answer);
5775 }
5776
5777
GenerateSubString(ZoneList<Expression * > * args)5778 void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
5779 ASSERT_EQ(3, args->length());
5780
5781 Load(args->at(0));
5782 Load(args->at(1));
5783 Load(args->at(2));
5784
5785 SubStringStub stub;
5786 Result answer = frame_->CallStub(&stub, 3);
5787 frame_->Push(&answer);
5788 }
5789
5790
GenerateStringCompare(ZoneList<Expression * > * args)5791 void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
5792 ASSERT_EQ(2, args->length());
5793
5794 Load(args->at(0));
5795 Load(args->at(1));
5796
5797 StringCompareStub stub;
5798 Result answer = frame_->CallStub(&stub, 2);
5799 frame_->Push(&answer);
5800 }
5801
5802
GenerateRegExpExec(ZoneList<Expression * > * args)5803 void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
5804 ASSERT_EQ(args->length(), 4);
5805
5806 // Load the arguments on the stack and call the stub.
5807 Load(args->at(0));
5808 Load(args->at(1));
5809 Load(args->at(2));
5810 Load(args->at(3));
5811 RegExpExecStub stub;
5812 Result result = frame_->CallStub(&stub, 4);
5813 frame_->Push(&result);
5814 }
5815
5816
GenerateNumberToString(ZoneList<Expression * > * args)5817 void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
5818 ASSERT_EQ(args->length(), 1);
5819
5820 // Load the argument on the stack and call the stub.
5821 Load(args->at(0));
5822 NumberToStringStub stub;
5823 Result result = frame_->CallStub(&stub, 1);
5824 frame_->Push(&result);
5825 }
5826
5827
GenerateMathSin(ZoneList<Expression * > * args)5828 void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
5829 ASSERT_EQ(args->length(), 1);
5830 Load(args->at(0));
5831 TranscendentalCacheStub stub(TranscendentalCache::SIN);
5832 Result result = frame_->CallStub(&stub, 1);
5833 frame_->Push(&result);
5834 }
5835
5836
GenerateMathCos(ZoneList<Expression * > * args)5837 void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
5838 ASSERT_EQ(args->length(), 1);
5839 Load(args->at(0));
5840 TranscendentalCacheStub stub(TranscendentalCache::COS);
5841 Result result = frame_->CallStub(&stub, 1);
5842 frame_->Push(&result);
5843 }
5844
5845
VisitCallRuntime(CallRuntime * node)5846 void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
5847 if (CheckForInlineRuntimeCall(node)) {
5848 return;
5849 }
5850
5851 ZoneList<Expression*>* args = node->arguments();
5852 Comment cmnt(masm_, "[ CallRuntime");
5853 Runtime::Function* function = node->function();
5854
5855 if (function == NULL) {
5856 // Push the builtins object found in the current global object.
5857 Result temp = allocator()->Allocate();
5858 ASSERT(temp.is_valid());
5859 __ mov(temp.reg(), GlobalObject());
5860 __ mov(temp.reg(), FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
5861 frame_->Push(&temp);
5862 }
5863
5864 // Push the arguments ("left-to-right").
5865 int arg_count = args->length();
5866 for (int i = 0; i < arg_count; i++) {
5867 Load(args->at(i));
5868 }
5869
5870 if (function == NULL) {
5871 // Call the JS runtime function.
5872 frame_->Push(node->name());
5873 Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
5874 arg_count,
5875 loop_nesting_);
5876 frame_->RestoreContextRegister();
5877 frame_->Push(&answer);
5878 } else {
5879 // Call the C runtime function.
5880 Result answer = frame_->CallRuntime(function, arg_count);
5881 frame_->Push(&answer);
5882 }
5883 }
5884
5885
VisitUnaryOperation(UnaryOperation * node)5886 void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
5887 Comment cmnt(masm_, "[ UnaryOperation");
5888
5889 Token::Value op = node->op();
5890
5891 if (op == Token::NOT) {
5892 // Swap the true and false targets but keep the same actual label
5893 // as the fall through.
5894 destination()->Invert();
5895 LoadCondition(node->expression(), destination(), true);
5896 // Swap the labels back.
5897 destination()->Invert();
5898
5899 } else if (op == Token::DELETE) {
5900 Property* property = node->expression()->AsProperty();
5901 if (property != NULL) {
5902 Load(property->obj());
5903 Load(property->key());
5904 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
5905 frame_->Push(&answer);
5906 return;
5907 }
5908
5909 Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
5910 if (variable != NULL) {
5911 Slot* slot = variable->slot();
5912 if (variable->is_global()) {
5913 LoadGlobal();
5914 frame_->Push(variable->name());
5915 Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
5916 CALL_FUNCTION, 2);
5917 frame_->Push(&answer);
5918 return;
5919
5920 } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
5921 // Call the runtime to look up the context holding the named
5922 // variable. Sync the virtual frame eagerly so we can push the
5923 // arguments directly into place.
5924 frame_->SyncRange(0, frame_->element_count() - 1);
5925 frame_->EmitPush(esi);
5926 frame_->EmitPush(Immediate(variable->name()));
5927 Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
5928 ASSERT(context.is_register());
5929 frame_->EmitPush(context.reg());
5930 context.Unuse();
5931 frame_->EmitPush(Immediate(variable->name()));
5932 Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
5933 CALL_FUNCTION, 2);
5934 frame_->Push(&answer);
5935 return;
5936 }
5937
5938 // Default: Result of deleting non-global, not dynamically
5939 // introduced variables is false.
5940 frame_->Push(Factory::false_value());
5941
5942 } else {
5943 // Default: Result of deleting expressions is true.
5944 Load(node->expression()); // may have side-effects
5945 frame_->SetElementAt(0, Factory::true_value());
5946 }
5947
5948 } else if (op == Token::TYPEOF) {
5949 // Special case for loading the typeof expression; see comment on
5950 // LoadTypeofExpression().
5951 LoadTypeofExpression(node->expression());
5952 Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
5953 frame_->Push(&answer);
5954
5955 } else if (op == Token::VOID) {
5956 Expression* expression = node->expression();
5957 if (expression && expression->AsLiteral() && (
5958 expression->AsLiteral()->IsTrue() ||
5959 expression->AsLiteral()->IsFalse() ||
5960 expression->AsLiteral()->handle()->IsNumber() ||
5961 expression->AsLiteral()->handle()->IsString() ||
5962 expression->AsLiteral()->handle()->IsJSRegExp() ||
5963 expression->AsLiteral()->IsNull())) {
5964 // Omit evaluating the value of the primitive literal.
5965 // It will be discarded anyway, and can have no side effect.
5966 frame_->Push(Factory::undefined_value());
5967 } else {
5968 Load(node->expression());
5969 frame_->SetElementAt(0, Factory::undefined_value());
5970 }
5971
5972 } else {
5973 Load(node->expression());
5974 bool overwrite =
5975 (node->expression()->AsBinaryOperation() != NULL &&
5976 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
5977 switch (op) {
5978 case Token::SUB: {
5979 GenericUnaryOpStub stub(Token::SUB, overwrite);
5980 Result operand = frame_->Pop();
5981 Result answer = frame_->CallStub(&stub, &operand);
5982 frame_->Push(&answer);
5983 break;
5984 }
5985
5986 case Token::BIT_NOT: {
5987 // Smi check.
5988 JumpTarget smi_label;
5989 JumpTarget continue_label;
5990 Result operand = frame_->Pop();
5991 operand.ToRegister();
5992 __ test(operand.reg(), Immediate(kSmiTagMask));
5993 smi_label.Branch(zero, &operand, taken);
5994
5995 GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
5996 Result answer = frame_->CallStub(&stub, &operand);
5997 continue_label.Jump(&answer);
5998
5999 smi_label.Bind(&answer);
6000 answer.ToRegister();
6001 frame_->Spill(answer.reg());
6002 __ not_(answer.reg());
6003 __ and_(answer.reg(), ~kSmiTagMask); // Remove inverted smi-tag.
6004
6005 continue_label.Bind(&answer);
6006 frame_->Push(&answer);
6007 break;
6008 }
6009
6010 case Token::ADD: {
6011 // Smi check.
6012 JumpTarget continue_label;
6013 Result operand = frame_->Pop();
6014 operand.ToRegister();
6015 __ test(operand.reg(), Immediate(kSmiTagMask));
6016 continue_label.Branch(zero, &operand, taken);
6017
6018 frame_->Push(&operand);
6019 Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
6020 CALL_FUNCTION, 1);
6021
6022 continue_label.Bind(&answer);
6023 frame_->Push(&answer);
6024 break;
6025 }
6026
6027 default:
6028 // NOT, DELETE, TYPEOF, and VOID are handled outside the
6029 // switch.
6030 UNREACHABLE();
6031 }
6032 }
6033 }
6034
6035
6036 // The value in dst was optimistically incremented or decremented. The
6037 // result overflowed or was not smi tagged. Undo the operation, call
6038 // into the runtime to convert the argument to a number, and call the
6039 // specialized add or subtract stub. The result is left in dst.
6040 class DeferredPrefixCountOperation: public DeferredCode {
6041 public:
DeferredPrefixCountOperation(Register dst,bool is_increment)6042 DeferredPrefixCountOperation(Register dst, bool is_increment)
6043 : dst_(dst), is_increment_(is_increment) {
6044 set_comment("[ DeferredCountOperation");
6045 }
6046
6047 virtual void Generate();
6048
6049 private:
6050 Register dst_;
6051 bool is_increment_;
6052 };
6053
6054
Generate()6055 void DeferredPrefixCountOperation::Generate() {
6056 // Undo the optimistic smi operation.
6057 if (is_increment_) {
6058 __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
6059 } else {
6060 __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
6061 }
6062 __ push(dst_);
6063 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
6064 __ push(eax);
6065 __ push(Immediate(Smi::FromInt(1)));
6066 if (is_increment_) {
6067 __ CallRuntime(Runtime::kNumberAdd, 2);
6068 } else {
6069 __ CallRuntime(Runtime::kNumberSub, 2);
6070 }
6071 if (!dst_.is(eax)) __ mov(dst_, eax);
6072 }
6073
6074
6075 // The value in dst was optimistically incremented or decremented. The
6076 // result overflowed or was not smi tagged. Undo the operation and call
6077 // into the runtime to convert the argument to a number. Update the
6078 // original value in old. Call the specialized add or subtract stub.
6079 // The result is left in dst.
6080 class DeferredPostfixCountOperation: public DeferredCode {
6081 public:
DeferredPostfixCountOperation(Register dst,Register old,bool is_increment)6082 DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
6083 : dst_(dst), old_(old), is_increment_(is_increment) {
6084 set_comment("[ DeferredCountOperation");
6085 }
6086
6087 virtual void Generate();
6088
6089 private:
6090 Register dst_;
6091 Register old_;
6092 bool is_increment_;
6093 };
6094
6095
Generate()6096 void DeferredPostfixCountOperation::Generate() {
6097 // Undo the optimistic smi operation.
6098 if (is_increment_) {
6099 __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
6100 } else {
6101 __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
6102 }
6103 __ push(dst_);
6104 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
6105
6106 // Save the result of ToNumber to use as the old value.
6107 __ push(eax);
6108
6109 // Call the runtime for the addition or subtraction.
6110 __ push(eax);
6111 __ push(Immediate(Smi::FromInt(1)));
6112 if (is_increment_) {
6113 __ CallRuntime(Runtime::kNumberAdd, 2);
6114 } else {
6115 __ CallRuntime(Runtime::kNumberSub, 2);
6116 }
6117 if (!dst_.is(eax)) __ mov(dst_, eax);
6118 __ pop(old_);
6119 }
6120
6121
VisitCountOperation(CountOperation * node)6122 void CodeGenerator::VisitCountOperation(CountOperation* node) {
6123 Comment cmnt(masm_, "[ CountOperation");
6124
6125 bool is_postfix = node->is_postfix();
6126 bool is_increment = node->op() == Token::INC;
6127
6128 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
6129 bool is_const = (var != NULL && var->mode() == Variable::CONST);
6130
6131 // Postfix operations need a stack slot under the reference to hold
6132 // the old value while the new value is being stored. This is so that
6133 // in the case that storing the new value requires a call, the old
6134 // value will be in the frame to be spilled.
6135 if (is_postfix) frame_->Push(Smi::FromInt(0));
6136
6137 // A constant reference is not saved to, so a constant reference is not a
6138 // compound assignment reference.
6139 { Reference target(this, node->expression(), !is_const);
6140 if (target.is_illegal()) {
6141 // Spoof the virtual frame to have the expected height (one higher
6142 // than on entry).
6143 if (!is_postfix) frame_->Push(Smi::FromInt(0));
6144 return;
6145 }
6146 target.TakeValue();
6147
6148 Result new_value = frame_->Pop();
6149 new_value.ToRegister();
6150
6151 Result old_value; // Only allocated in the postfix case.
6152 if (is_postfix) {
6153 // Allocate a temporary to preserve the old value.
6154 old_value = allocator_->Allocate();
6155 ASSERT(old_value.is_valid());
6156 __ mov(old_value.reg(), new_value.reg());
6157 }
6158 // Ensure the new value is writable.
6159 frame_->Spill(new_value.reg());
6160
6161 // In order to combine the overflow and the smi tag check, we need
6162 // to be able to allocate a byte register. We attempt to do so
6163 // without spilling. If we fail, we will generate separate overflow
6164 // and smi tag checks.
6165 //
6166 // We allocate and clear the temporary byte register before
6167 // performing the count operation since clearing the register using
6168 // xor will clear the overflow flag.
6169 Result tmp = allocator_->AllocateByteRegisterWithoutSpilling();
6170 if (tmp.is_valid()) {
6171 __ Set(tmp.reg(), Immediate(0));
6172 }
6173
6174 DeferredCode* deferred = NULL;
6175 if (is_postfix) {
6176 deferred = new DeferredPostfixCountOperation(new_value.reg(),
6177 old_value.reg(),
6178 is_increment);
6179 } else {
6180 deferred = new DeferredPrefixCountOperation(new_value.reg(),
6181 is_increment);
6182 }
6183
6184 if (is_increment) {
6185 __ add(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
6186 } else {
6187 __ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
6188 }
6189
6190 // If the count operation didn't overflow and the result is a valid
6191 // smi, we're done. Otherwise, we jump to the deferred slow-case
6192 // code.
6193 if (tmp.is_valid()) {
6194 // We combine the overflow and the smi tag check if we could
6195 // successfully allocate a temporary byte register.
6196 __ setcc(overflow, tmp.reg());
6197 __ or_(Operand(tmp.reg()), new_value.reg());
6198 __ test(tmp.reg(), Immediate(kSmiTagMask));
6199 tmp.Unuse();
6200 deferred->Branch(not_zero);
6201 } else {
6202 // Otherwise we test separately for overflow and smi tag.
6203 deferred->Branch(overflow);
6204 __ test(new_value.reg(), Immediate(kSmiTagMask));
6205 deferred->Branch(not_zero);
6206 }
6207 deferred->BindExit();
6208
6209 // Postfix: store the old value in the allocated slot under the
6210 // reference.
6211 if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
6212
6213 frame_->Push(&new_value);
6214 // Non-constant: update the reference.
6215 if (!is_const) target.SetValue(NOT_CONST_INIT);
6216 }
6217
6218 // Postfix: drop the new value and use the old.
6219 if (is_postfix) frame_->Drop();
6220 }
6221
6222
VisitBinaryOperation(BinaryOperation * node)6223 void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
6224 Comment cmnt(masm_, "[ BinaryOperation");
6225 Token::Value op = node->op();
6226
6227 // According to ECMA-262 section 11.11, page 58, the binary logical
6228 // operators must yield the result of one of the two expressions
6229 // before any ToBoolean() conversions. This means that the value
6230 // produced by a && or || operator is not necessarily a boolean.
6231
6232 // NOTE: If the left hand side produces a materialized value (not
6233 // control flow), we force the right hand side to do the same. This
6234 // is necessary because we assume that if we get control flow on the
6235 // last path out of an expression we got it on all paths.
6236 if (op == Token::AND) {
6237 JumpTarget is_true;
6238 ControlDestination dest(&is_true, destination()->false_target(), true);
6239 LoadCondition(node->left(), &dest, false);
6240
6241 if (dest.false_was_fall_through()) {
6242 // The current false target was used as the fall-through. If
6243 // there are no dangling jumps to is_true then the left
6244 // subexpression was unconditionally false. Otherwise we have
6245 // paths where we do have to evaluate the right subexpression.
6246 if (is_true.is_linked()) {
6247 // We need to compile the right subexpression. If the jump to
6248 // the current false target was a forward jump then we have a
6249 // valid frame, we have just bound the false target, and we
6250 // have to jump around the code for the right subexpression.
6251 if (has_valid_frame()) {
6252 destination()->false_target()->Unuse();
6253 destination()->false_target()->Jump();
6254 }
6255 is_true.Bind();
6256 // The left subexpression compiled to control flow, so the
6257 // right one is free to do so as well.
6258 LoadCondition(node->right(), destination(), false);
6259 } else {
6260 // We have actually just jumped to or bound the current false
6261 // target but the current control destination is not marked as
6262 // used.
6263 destination()->Use(false);
6264 }
6265
6266 } else if (dest.is_used()) {
6267 // The left subexpression compiled to control flow (and is_true
6268 // was just bound), so the right is free to do so as well.
6269 LoadCondition(node->right(), destination(), false);
6270
6271 } else {
6272 // We have a materialized value on the frame, so we exit with
6273 // one on all paths. There are possibly also jumps to is_true
6274 // from nested subexpressions.
6275 JumpTarget pop_and_continue;
6276 JumpTarget exit;
6277
6278 // Avoid popping the result if it converts to 'false' using the
6279 // standard ToBoolean() conversion as described in ECMA-262,
6280 // section 9.2, page 30.
6281 //
6282 // Duplicate the TOS value. The duplicate will be popped by
6283 // ToBoolean.
6284 frame_->Dup();
6285 ControlDestination dest(&pop_and_continue, &exit, true);
6286 ToBoolean(&dest);
6287
6288 // Pop the result of evaluating the first part.
6289 frame_->Drop();
6290
6291 // Compile right side expression.
6292 is_true.Bind();
6293 Load(node->right());
6294
6295 // Exit (always with a materialized value).
6296 exit.Bind();
6297 }
6298
6299 } else if (op == Token::OR) {
6300 JumpTarget is_false;
6301 ControlDestination dest(destination()->true_target(), &is_false, false);
6302 LoadCondition(node->left(), &dest, false);
6303
6304 if (dest.true_was_fall_through()) {
6305 // The current true target was used as the fall-through. If
6306 // there are no dangling jumps to is_false then the left
6307 // subexpression was unconditionally true. Otherwise we have
6308 // paths where we do have to evaluate the right subexpression.
6309 if (is_false.is_linked()) {
6310 // We need to compile the right subexpression. If the jump to
6311 // the current true target was a forward jump then we have a
6312 // valid frame, we have just bound the true target, and we
6313 // have to jump around the code for the right subexpression.
6314 if (has_valid_frame()) {
6315 destination()->true_target()->Unuse();
6316 destination()->true_target()->Jump();
6317 }
6318 is_false.Bind();
6319 // The left subexpression compiled to control flow, so the
6320 // right one is free to do so as well.
6321 LoadCondition(node->right(), destination(), false);
6322 } else {
6323 // We have just jumped to or bound the current true target but
6324 // the current control destination is not marked as used.
6325 destination()->Use(true);
6326 }
6327
6328 } else if (dest.is_used()) {
6329 // The left subexpression compiled to control flow (and is_false
6330 // was just bound), so the right is free to do so as well.
6331 LoadCondition(node->right(), destination(), false);
6332
6333 } else {
6334 // We have a materialized value on the frame, so we exit with
6335 // one on all paths. There are possibly also jumps to is_false
6336 // from nested subexpressions.
6337 JumpTarget pop_and_continue;
6338 JumpTarget exit;
6339
6340 // Avoid popping the result if it converts to 'true' using the
6341 // standard ToBoolean() conversion as described in ECMA-262,
6342 // section 9.2, page 30.
6343 //
6344 // Duplicate the TOS value. The duplicate will be popped by
6345 // ToBoolean.
6346 frame_->Dup();
6347 ControlDestination dest(&exit, &pop_and_continue, false);
6348 ToBoolean(&dest);
6349
6350 // Pop the result of evaluating the first part.
6351 frame_->Drop();
6352
6353 // Compile right side expression.
6354 is_false.Bind();
6355 Load(node->right());
6356
6357 // Exit (always with a materialized value).
6358 exit.Bind();
6359 }
6360
6361 } else {
6362 // NOTE: The code below assumes that the slow cases (calls to runtime)
6363 // never return a constant/immutable object.
6364 OverwriteMode overwrite_mode = NO_OVERWRITE;
6365 if (node->left()->AsBinaryOperation() != NULL &&
6366 node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
6367 overwrite_mode = OVERWRITE_LEFT;
6368 } else if (node->right()->AsBinaryOperation() != NULL &&
6369 node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
6370 overwrite_mode = OVERWRITE_RIGHT;
6371 }
6372
6373 Load(node->left());
6374 Load(node->right());
6375 GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
6376 }
6377 }
6378
6379
VisitThisFunction(ThisFunction * node)6380 void CodeGenerator::VisitThisFunction(ThisFunction* node) {
6381 frame_->PushFunction();
6382 }
6383
6384
VisitCompareOperation(CompareOperation * node)6385 void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
6386 Comment cmnt(masm_, "[ CompareOperation");
6387
6388 bool left_already_loaded = false;
6389
6390 // Get the expressions from the node.
6391 Expression* left = node->left();
6392 Expression* right = node->right();
6393 Token::Value op = node->op();
6394 // To make typeof testing for natives implemented in JavaScript really
6395 // efficient, we generate special code for expressions of the form:
6396 // 'typeof <expression> == <string>'.
6397 UnaryOperation* operation = left->AsUnaryOperation();
6398 if ((op == Token::EQ || op == Token::EQ_STRICT) &&
6399 (operation != NULL && operation->op() == Token::TYPEOF) &&
6400 (right->AsLiteral() != NULL &&
6401 right->AsLiteral()->handle()->IsString())) {
6402 Handle<String> check(String::cast(*right->AsLiteral()->handle()));
6403
6404 // Load the operand and move it to a register.
6405 LoadTypeofExpression(operation->expression());
6406 Result answer = frame_->Pop();
6407 answer.ToRegister();
6408
6409 if (check->Equals(Heap::number_symbol())) {
6410 __ test(answer.reg(), Immediate(kSmiTagMask));
6411 destination()->true_target()->Branch(zero);
6412 frame_->Spill(answer.reg());
6413 __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
6414 __ cmp(answer.reg(), Factory::heap_number_map());
6415 answer.Unuse();
6416 destination()->Split(equal);
6417
6418 } else if (check->Equals(Heap::string_symbol())) {
6419 __ test(answer.reg(), Immediate(kSmiTagMask));
6420 destination()->false_target()->Branch(zero);
6421
6422 // It can be an undetectable string object.
6423 Result temp = allocator()->Allocate();
6424 ASSERT(temp.is_valid());
6425 __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
6426 __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kBitFieldOffset));
6427 __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
6428 destination()->false_target()->Branch(not_zero);
6429 __ CmpObjectType(answer.reg(), FIRST_NONSTRING_TYPE, temp.reg());
6430 temp.Unuse();
6431 answer.Unuse();
6432 destination()->Split(below);
6433
6434 } else if (check->Equals(Heap::boolean_symbol())) {
6435 __ cmp(answer.reg(), Factory::true_value());
6436 destination()->true_target()->Branch(equal);
6437 __ cmp(answer.reg(), Factory::false_value());
6438 answer.Unuse();
6439 destination()->Split(equal);
6440
6441 } else if (check->Equals(Heap::undefined_symbol())) {
6442 __ cmp(answer.reg(), Factory::undefined_value());
6443 destination()->true_target()->Branch(equal);
6444
6445 __ test(answer.reg(), Immediate(kSmiTagMask));
6446 destination()->false_target()->Branch(zero);
6447
6448 // It can be an undetectable object.
6449 frame_->Spill(answer.reg());
6450 __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
6451 __ movzx_b(answer.reg(),
6452 FieldOperand(answer.reg(), Map::kBitFieldOffset));
6453 __ test(answer.reg(), Immediate(1 << Map::kIsUndetectable));
6454 answer.Unuse();
6455 destination()->Split(not_zero);
6456
6457 } else if (check->Equals(Heap::function_symbol())) {
6458 __ test(answer.reg(), Immediate(kSmiTagMask));
6459 destination()->false_target()->Branch(zero);
6460 frame_->Spill(answer.reg());
6461 __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
6462 destination()->true_target()->Branch(equal);
6463 // Regular expressions are callable so typeof == 'function'.
6464 __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
6465 answer.Unuse();
6466 destination()->Split(equal);
6467 } else if (check->Equals(Heap::object_symbol())) {
6468 __ test(answer.reg(), Immediate(kSmiTagMask));
6469 destination()->false_target()->Branch(zero);
6470 __ cmp(answer.reg(), Factory::null_value());
6471 destination()->true_target()->Branch(equal);
6472
6473 Result map = allocator()->Allocate();
6474 ASSERT(map.is_valid());
6475 // Regular expressions are typeof == 'function', not 'object'.
6476 __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, map.reg());
6477 destination()->false_target()->Branch(equal);
6478
6479 // It can be an undetectable object.
6480 __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
6481 __ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
6482 destination()->false_target()->Branch(not_zero);
6483 __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
6484 __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
6485 __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
6486 destination()->false_target()->Branch(less);
6487 __ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
6488 answer.Unuse();
6489 map.Unuse();
6490 destination()->Split(less_equal);
6491 } else {
6492 // Uncommon case: typeof testing against a string literal that is
6493 // never returned from the typeof operator.
6494 answer.Unuse();
6495 destination()->Goto(false);
6496 }
6497 return;
6498 } else if (op == Token::LT &&
6499 right->AsLiteral() != NULL &&
6500 right->AsLiteral()->handle()->IsHeapNumber()) {
6501 Handle<HeapNumber> check(HeapNumber::cast(*right->AsLiteral()->handle()));
6502 if (check->value() == 2147483648.0) { // 0x80000000.
6503 Load(left);
6504 left_already_loaded = true;
6505 Result lhs = frame_->Pop();
6506 lhs.ToRegister();
6507 __ test(lhs.reg(), Immediate(kSmiTagMask));
6508 destination()->true_target()->Branch(zero); // All Smis are less.
6509 Result scratch = allocator()->Allocate();
6510 ASSERT(scratch.is_valid());
6511 __ mov(scratch.reg(), FieldOperand(lhs.reg(), HeapObject::kMapOffset));
6512 __ cmp(scratch.reg(), Factory::heap_number_map());
6513 JumpTarget not_a_number;
6514 not_a_number.Branch(not_equal, &lhs);
6515 __ mov(scratch.reg(),
6516 FieldOperand(lhs.reg(), HeapNumber::kExponentOffset));
6517 __ cmp(Operand(scratch.reg()), Immediate(0xfff00000));
6518 not_a_number.Branch(above_equal, &lhs); // It's a negative NaN or -Inf.
6519 const uint32_t borderline_exponent =
6520 (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
6521 __ cmp(Operand(scratch.reg()), Immediate(borderline_exponent));
6522 scratch.Unuse();
6523 lhs.Unuse();
6524 destination()->true_target()->Branch(less);
6525 destination()->false_target()->Jump();
6526
6527 not_a_number.Bind(&lhs);
6528 frame_->Push(&lhs);
6529 }
6530 }
6531
6532 Condition cc = no_condition;
6533 bool strict = false;
6534 switch (op) {
6535 case Token::EQ_STRICT:
6536 strict = true;
6537 // Fall through
6538 case Token::EQ:
6539 cc = equal;
6540 break;
6541 case Token::LT:
6542 cc = less;
6543 break;
6544 case Token::GT:
6545 cc = greater;
6546 break;
6547 case Token::LTE:
6548 cc = less_equal;
6549 break;
6550 case Token::GTE:
6551 cc = greater_equal;
6552 break;
6553 case Token::IN: {
6554 if (!left_already_loaded) Load(left);
6555 Load(right);
6556 Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
6557 frame_->Push(&answer); // push the result
6558 return;
6559 }
6560 case Token::INSTANCEOF: {
6561 if (!left_already_loaded) Load(left);
6562 Load(right);
6563 InstanceofStub stub;
6564 Result answer = frame_->CallStub(&stub, 2);
6565 answer.ToRegister();
6566 __ test(answer.reg(), Operand(answer.reg()));
6567 answer.Unuse();
6568 destination()->Split(zero);
6569 return;
6570 }
6571 default:
6572 UNREACHABLE();
6573 }
6574 if (!left_already_loaded) Load(left);
6575 Load(right);
6576 Comparison(node, cc, strict, destination());
6577 }
6578
6579
6580 #ifdef DEBUG
HasValidEntryRegisters()6581 bool CodeGenerator::HasValidEntryRegisters() {
6582 return (allocator()->count(eax) == (frame()->is_used(eax) ? 1 : 0))
6583 && (allocator()->count(ebx) == (frame()->is_used(ebx) ? 1 : 0))
6584 && (allocator()->count(ecx) == (frame()->is_used(ecx) ? 1 : 0))
6585 && (allocator()->count(edx) == (frame()->is_used(edx) ? 1 : 0))
6586 && (allocator()->count(edi) == (frame()->is_used(edi) ? 1 : 0));
6587 }
6588 #endif
6589
6590
6591 // Emit a LoadIC call to get the value from receiver and leave it in
6592 // dst.
6593 class DeferredReferenceGetNamedValue: public DeferredCode {
6594 public:
DeferredReferenceGetNamedValue(Register dst,Register receiver,Handle<String> name)6595 DeferredReferenceGetNamedValue(Register dst,
6596 Register receiver,
6597 Handle<String> name)
6598 : dst_(dst), receiver_(receiver), name_(name) {
6599 set_comment("[ DeferredReferenceGetNamedValue");
6600 }
6601
6602 virtual void Generate();
6603
patch_site()6604 Label* patch_site() { return &patch_site_; }
6605
6606 private:
6607 Label patch_site_;
6608 Register dst_;
6609 Register receiver_;
6610 Handle<String> name_;
6611 };
6612
6613
Generate()6614 void DeferredReferenceGetNamedValue::Generate() {
6615 if (!receiver_.is(eax)) {
6616 __ mov(eax, receiver_);
6617 }
6618 __ Set(ecx, Immediate(name_));
6619 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
6620 __ call(ic, RelocInfo::CODE_TARGET);
6621 // The call must be followed by a test eax instruction to indicate
6622 // that the inobject property case was inlined.
6623 //
6624 // Store the delta to the map check instruction here in the test
6625 // instruction. Use masm_-> instead of the __ macro since the
6626 // latter can't return a value.
6627 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
6628 // Here we use masm_-> instead of the __ macro because this is the
6629 // instruction that gets patched and coverage code gets in the way.
6630 masm_->test(eax, Immediate(-delta_to_patch_site));
6631 __ IncrementCounter(&Counters::named_load_inline_miss, 1);
6632
6633 if (!dst_.is(eax)) __ mov(dst_, eax);
6634 }
6635
6636
6637 class DeferredReferenceGetKeyedValue: public DeferredCode {
6638 public:
DeferredReferenceGetKeyedValue(Register dst,Register receiver,Register key)6639 explicit DeferredReferenceGetKeyedValue(Register dst,
6640 Register receiver,
6641 Register key)
6642 : dst_(dst), receiver_(receiver), key_(key) {
6643 set_comment("[ DeferredReferenceGetKeyedValue");
6644 }
6645
6646 virtual void Generate();
6647
patch_site()6648 Label* patch_site() { return &patch_site_; }
6649
6650 private:
6651 Label patch_site_;
6652 Register dst_;
6653 Register receiver_;
6654 Register key_;
6655 };
6656
6657
Generate()6658 void DeferredReferenceGetKeyedValue::Generate() {
6659 if (!receiver_.is(eax)) {
6660 // Register eax is available for key.
6661 if (!key_.is(eax)) {
6662 __ mov(eax, key_);
6663 }
6664 if (!receiver_.is(edx)) {
6665 __ mov(edx, receiver_);
6666 }
6667 } else if (!key_.is(edx)) {
6668 // Register edx is available for receiver.
6669 if (!receiver_.is(edx)) {
6670 __ mov(edx, receiver_);
6671 }
6672 if (!key_.is(eax)) {
6673 __ mov(eax, key_);
6674 }
6675 } else {
6676 __ xchg(edx, eax);
6677 }
6678 // Calculate the delta from the IC call instruction to the map check
6679 // cmp instruction in the inlined version. This delta is stored in
6680 // a test(eax, delta) instruction after the call so that we can find
6681 // it in the IC initialization code and patch the cmp instruction.
6682 // This means that we cannot allow test instructions after calls to
6683 // KeyedLoadIC stubs in other places.
6684 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
6685 __ call(ic, RelocInfo::CODE_TARGET);
6686 // The delta from the start of the map-compare instruction to the
6687 // test instruction. We use masm_-> directly here instead of the __
6688 // macro because the macro sometimes uses macro expansion to turn
6689 // into something that can't return a value. This is encountered
6690 // when doing generated code coverage tests.
6691 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
6692 // Here we use masm_-> instead of the __ macro because this is the
6693 // instruction that gets patched and coverage code gets in the way.
6694 masm_->test(eax, Immediate(-delta_to_patch_site));
6695 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
6696
6697 if (!dst_.is(eax)) __ mov(dst_, eax);
6698 }
6699
6700
6701 class DeferredReferenceSetKeyedValue: public DeferredCode {
6702 public:
DeferredReferenceSetKeyedValue(Register value,Register key,Register receiver)6703 DeferredReferenceSetKeyedValue(Register value,
6704 Register key,
6705 Register receiver)
6706 : value_(value), key_(key), receiver_(receiver) {
6707 set_comment("[ DeferredReferenceSetKeyedValue");
6708 }
6709
6710 virtual void Generate();
6711
patch_site()6712 Label* patch_site() { return &patch_site_; }
6713
6714 private:
6715 Register value_;
6716 Register key_;
6717 Register receiver_;
6718 Label patch_site_;
6719 };
6720
6721
Generate()6722 void DeferredReferenceSetKeyedValue::Generate() {
6723 __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
6724 // Push receiver and key arguments on the stack.
6725 __ push(receiver_);
6726 __ push(key_);
6727 // Move value argument to eax as expected by the IC stub.
6728 if (!value_.is(eax)) __ mov(eax, value_);
6729 // Call the IC stub.
6730 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
6731 __ call(ic, RelocInfo::CODE_TARGET);
6732 // The delta from the start of the map-compare instruction to the
6733 // test instruction. We use masm_-> directly here instead of the
6734 // __ macro because the macro sometimes uses macro expansion to turn
6735 // into something that can't return a value. This is encountered
6736 // when doing generated code coverage tests.
6737 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
6738 // Here we use masm_-> instead of the __ macro because this is the
6739 // instruction that gets patched and coverage code gets in the way.
6740 masm_->test(eax, Immediate(-delta_to_patch_site));
6741 // Restore value (returned from store IC), key and receiver
6742 // registers.
6743 if (!value_.is(eax)) __ mov(value_, eax);
6744 __ pop(key_);
6745 __ pop(receiver_);
6746 }
6747
6748
EmitNamedLoad(Handle<String> name,bool is_contextual)6749 Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
6750 #ifdef DEBUG
6751 int original_height = frame()->height();
6752 #endif
6753 Result result;
6754 // Do not inline the inobject property case for loads from the global
6755 // object. Also do not inline for unoptimized code. This saves time in
6756 // the code generator. Unoptimized code is toplevel code or code that is
6757 // not in a loop.
6758 if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
6759 Comment cmnt(masm(), "[ Load from named Property");
6760 frame()->Push(name);
6761
6762 RelocInfo::Mode mode = is_contextual
6763 ? RelocInfo::CODE_TARGET_CONTEXT
6764 : RelocInfo::CODE_TARGET;
6765 result = frame()->CallLoadIC(mode);
6766 // A test eax instruction following the call signals that the inobject
6767 // property case was inlined. Ensure that there is not a test eax
6768 // instruction here.
6769 __ nop();
6770 } else {
6771 // Inline the inobject property case.
6772 Comment cmnt(masm(), "[ Inlined named property load");
6773 Result receiver = frame()->Pop();
6774 receiver.ToRegister();
6775
6776 result = allocator()->Allocate();
6777 ASSERT(result.is_valid());
6778 DeferredReferenceGetNamedValue* deferred =
6779 new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
6780
6781 // Check that the receiver is a heap object.
6782 __ test(receiver.reg(), Immediate(kSmiTagMask));
6783 deferred->Branch(zero);
6784
6785 __ bind(deferred->patch_site());
6786 // This is the map check instruction that will be patched (so we can't
6787 // use the double underscore macro that may insert instructions).
6788 // Initially use an invalid map to force a failure.
6789 masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
6790 Immediate(Factory::null_value()));
6791 // This branch is always a forwards branch so it's always a fixed size
6792 // which allows the assert below to succeed and patching to work.
6793 deferred->Branch(not_equal);
6794
6795 // The delta from the patch label to the load offset must be statically
6796 // known.
6797 ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
6798 LoadIC::kOffsetToLoadInstruction);
6799 // The initial (invalid) offset has to be large enough to force a 32-bit
6800 // instruction encoding to allow patching with an arbitrary offset. Use
6801 // kMaxInt (minus kHeapObjectTag).
6802 int offset = kMaxInt;
6803 masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
6804
6805 __ IncrementCounter(&Counters::named_load_inline, 1);
6806 deferred->BindExit();
6807 }
6808 ASSERT(frame()->height() == original_height - 1);
6809 return result;
6810 }
6811
6812
EmitNamedStore(Handle<String> name,bool is_contextual)6813 Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
6814 #ifdef DEBUG
6815 int expected_height = frame()->height() - (is_contextual ? 1 : 2);
6816 #endif
6817 Result result = frame()->CallStoreIC(name, is_contextual);
6818
6819 ASSERT_EQ(expected_height, frame()->height());
6820 return result;
6821 }
6822
6823
EmitKeyedLoad()6824 Result CodeGenerator::EmitKeyedLoad() {
6825 #ifdef DEBUG
6826 int original_height = frame()->height();
6827 #endif
6828 Result result;
6829 // Inline array load code if inside of a loop. We do not know the
6830 // receiver map yet, so we initially generate the code with a check
6831 // against an invalid map. In the inline cache code, we patch the map
6832 // check if appropriate.
6833 if (loop_nesting() > 0) {
6834 Comment cmnt(masm_, "[ Inlined load from keyed Property");
6835
6836 Result key = frame_->Pop();
6837 Result receiver = frame_->Pop();
6838 key.ToRegister();
6839 receiver.ToRegister();
6840
6841 // Use a fresh temporary to load the elements without destroying
6842 // the receiver which is needed for the deferred slow case.
6843 Result elements = allocator()->Allocate();
6844 ASSERT(elements.is_valid());
6845
6846 // Use a fresh temporary for the index and later the loaded
6847 // value.
6848 result = allocator()->Allocate();
6849 ASSERT(result.is_valid());
6850
6851 DeferredReferenceGetKeyedValue* deferred =
6852 new DeferredReferenceGetKeyedValue(result.reg(),
6853 receiver.reg(),
6854 key.reg());
6855
6856 __ test(receiver.reg(), Immediate(kSmiTagMask));
6857 deferred->Branch(zero);
6858
6859 // Initially, use an invalid map. The map is patched in the IC
6860 // initialization code.
6861 __ bind(deferred->patch_site());
6862 // Use masm-> here instead of the double underscore macro since extra
6863 // coverage code can interfere with the patching.
6864 masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
6865 Immediate(Factory::null_value()));
6866 deferred->Branch(not_equal);
6867
6868 // Check that the key is a smi.
6869 __ test(key.reg(), Immediate(kSmiTagMask));
6870 deferred->Branch(not_zero);
6871
6872 // Get the elements array from the receiver and check that it
6873 // is not a dictionary.
6874 __ mov(elements.reg(),
6875 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
6876 __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
6877 Immediate(Factory::fixed_array_map()));
6878 deferred->Branch(not_equal);
6879
6880 // Shift the key to get the actual index value and check that
6881 // it is within bounds.
6882 __ mov(result.reg(), key.reg());
6883 __ SmiUntag(result.reg());
6884 __ cmp(result.reg(),
6885 FieldOperand(elements.reg(), FixedArray::kLengthOffset));
6886 deferred->Branch(above_equal);
6887
6888 // Load and check that the result is not the hole.
6889 __ mov(result.reg(), Operand(elements.reg(),
6890 result.reg(),
6891 times_4,
6892 FixedArray::kHeaderSize - kHeapObjectTag));
6893 elements.Unuse();
6894 __ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
6895 deferred->Branch(equal);
6896 __ IncrementCounter(&Counters::keyed_load_inline, 1);
6897
6898 deferred->BindExit();
6899 } else {
6900 Comment cmnt(masm_, "[ Load from keyed Property");
6901 result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
6902 // Make sure that we do not have a test instruction after the
6903 // call. A test instruction after the call is used to
6904 // indicate that we have generated an inline version of the
6905 // keyed load. The explicit nop instruction is here because
6906 // the push that follows might be peep-hole optimized away.
6907 __ nop();
6908 }
6909 ASSERT(frame()->height() == original_height - 2);
6910 return result;
6911 }
6912
6913
EmitKeyedStore(StaticType * key_type)6914 Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
6915 #ifdef DEBUG
6916 int original_height = frame()->height();
6917 #endif
6918 Result result;
6919 // Generate inlined version of the keyed store if the code is in a loop
6920 // and the key is likely to be a smi.
6921 if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
6922 Comment cmnt(masm(), "[ Inlined store to keyed Property");
6923
6924 // Get the receiver, key and value into registers.
6925 result = frame()->Pop();
6926 Result key = frame()->Pop();
6927 Result receiver = frame()->Pop();
6928
6929 Result tmp = allocator_->Allocate();
6930 ASSERT(tmp.is_valid());
6931
6932 // Determine whether the value is a constant before putting it in a
6933 // register.
6934 bool value_is_constant = result.is_constant();
6935
6936 // Make sure that value, key and receiver are in registers.
6937 result.ToRegister();
6938 key.ToRegister();
6939 receiver.ToRegister();
6940
6941 DeferredReferenceSetKeyedValue* deferred =
6942 new DeferredReferenceSetKeyedValue(result.reg(),
6943 key.reg(),
6944 receiver.reg());
6945
6946 // Check that the value is a smi if it is not a constant. We can skip
6947 // the write barrier for smis and constants.
6948 if (!value_is_constant) {
6949 __ test(result.reg(), Immediate(kSmiTagMask));
6950 deferred->Branch(not_zero);
6951 }
6952
6953 // Check that the key is a non-negative smi.
6954 __ test(key.reg(), Immediate(kSmiTagMask | 0x80000000));
6955 deferred->Branch(not_zero);
6956
6957 // Check that the receiver is not a smi.
6958 __ test(receiver.reg(), Immediate(kSmiTagMask));
6959 deferred->Branch(zero);
6960
6961 // Check that the receiver is a JSArray.
6962 __ mov(tmp.reg(),
6963 FieldOperand(receiver.reg(), HeapObject::kMapOffset));
6964 __ movzx_b(tmp.reg(),
6965 FieldOperand(tmp.reg(), Map::kInstanceTypeOffset));
6966 __ cmp(tmp.reg(), JS_ARRAY_TYPE);
6967 deferred->Branch(not_equal);
6968
6969 // Check that the key is within bounds. Both the key and the length of
6970 // the JSArray are smis.
6971 __ cmp(key.reg(),
6972 FieldOperand(receiver.reg(), JSArray::kLengthOffset));
6973 deferred->Branch(greater_equal);
6974
6975 // Get the elements array from the receiver and check that it is not a
6976 // dictionary.
6977 __ mov(tmp.reg(),
6978 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
6979 // Bind the deferred code patch site to be able to locate the fixed
6980 // array map comparison. When debugging, we patch this comparison to
6981 // always fail so that we will hit the IC call in the deferred code
6982 // which will allow the debugger to break for fast case stores.
6983 __ bind(deferred->patch_site());
6984 __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
6985 Immediate(Factory::fixed_array_map()));
6986 deferred->Branch(not_equal);
6987
6988 // Store the value.
6989 __ mov(Operand(tmp.reg(),
6990 key.reg(),
6991 times_2,
6992 FixedArray::kHeaderSize - kHeapObjectTag),
6993 result.reg());
6994 __ IncrementCounter(&Counters::keyed_store_inline, 1);
6995
6996 deferred->BindExit();
6997 } else {
6998 result = frame()->CallKeyedStoreIC();
6999 // Make sure that we do not have a test instruction after the
7000 // call. A test instruction after the call is used to
7001 // indicate that we have generated an inline version of the
7002 // keyed store.
7003 __ nop();
7004 frame()->Drop(2);
7005 }
7006 ASSERT(frame()->height() == original_height - 3);
7007 return result;
7008 }
7009
7010
7011 #undef __
7012 #define __ ACCESS_MASM(masm)
7013
7014
GetName()7015 Handle<String> Reference::GetName() {
7016 ASSERT(type_ == NAMED);
7017 Property* property = expression_->AsProperty();
7018 if (property == NULL) {
7019 // Global variable reference treated as a named property reference.
7020 VariableProxy* proxy = expression_->AsVariableProxy();
7021 ASSERT(proxy->AsVariable() != NULL);
7022 ASSERT(proxy->AsVariable()->is_global());
7023 return proxy->name();
7024 } else {
7025 Literal* raw_name = property->key()->AsLiteral();
7026 ASSERT(raw_name != NULL);
7027 return Handle<String>::cast(raw_name->handle());
7028 }
7029 }
7030
7031
GetValue()7032 void Reference::GetValue() {
7033 ASSERT(!cgen_->in_spilled_code());
7034 ASSERT(cgen_->HasValidEntryRegisters());
7035 ASSERT(!is_illegal());
7036 MacroAssembler* masm = cgen_->masm();
7037
7038 // Record the source position for the property load.
7039 Property* property = expression_->AsProperty();
7040 if (property != NULL) {
7041 cgen_->CodeForSourcePosition(property->position());
7042 }
7043
7044 switch (type_) {
7045 case SLOT: {
7046 Comment cmnt(masm, "[ Load from Slot");
7047 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
7048 ASSERT(slot != NULL);
7049 Result result =
7050 cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
7051 if (!persist_after_get_) set_unloaded();
7052 cgen_->frame()->Push(&result);
7053 break;
7054 }
7055
7056 case NAMED: {
7057 Variable* var = expression_->AsVariableProxy()->AsVariable();
7058 bool is_global = var != NULL;
7059 ASSERT(!is_global || var->is_global());
7060 if (persist_after_get_) cgen_->frame()->Dup();
7061 Result result = cgen_->EmitNamedLoad(GetName(), is_global);
7062 if (!persist_after_get_) set_unloaded();
7063 cgen_->frame()->Push(&result);
7064 break;
7065 }
7066
7067 case KEYED: {
7068 if (persist_after_get_) {
7069 cgen_->frame()->PushElementAt(1);
7070 cgen_->frame()->PushElementAt(1);
7071 }
7072 Result value = cgen_->EmitKeyedLoad();
7073 cgen_->frame()->Push(&value);
7074 if (!persist_after_get_) set_unloaded();
7075 break;
7076 }
7077
7078 default:
7079 UNREACHABLE();
7080 }
7081 }
7082
7083
TakeValue()7084 void Reference::TakeValue() {
7085 // For non-constant frame-allocated slots, we invalidate the value in the
7086 // slot. For all others, we fall back on GetValue.
7087 ASSERT(!cgen_->in_spilled_code());
7088 ASSERT(!is_illegal());
7089 if (type_ != SLOT) {
7090 GetValue();
7091 return;
7092 }
7093
7094 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
7095 ASSERT(slot != NULL);
7096 if (slot->type() == Slot::LOOKUP ||
7097 slot->type() == Slot::CONTEXT ||
7098 slot->var()->mode() == Variable::CONST ||
7099 slot->is_arguments()) {
7100 GetValue();
7101 return;
7102 }
7103
7104 // Only non-constant, frame-allocated parameters and locals can
7105 // reach here. Be careful not to use the optimizations for arguments
7106 // object access since it may not have been initialized yet.
7107 ASSERT(!slot->is_arguments());
7108 if (slot->type() == Slot::PARAMETER) {
7109 cgen_->frame()->TakeParameterAt(slot->index());
7110 } else {
7111 ASSERT(slot->type() == Slot::LOCAL);
7112 cgen_->frame()->TakeLocalAt(slot->index());
7113 }
7114
7115 ASSERT(persist_after_get_);
7116 // Do not unload the reference, because it is used in SetValue.
7117 }
7118
7119
SetValue(InitState init_state)7120 void Reference::SetValue(InitState init_state) {
7121 ASSERT(cgen_->HasValidEntryRegisters());
7122 ASSERT(!is_illegal());
7123 MacroAssembler* masm = cgen_->masm();
7124 switch (type_) {
7125 case SLOT: {
7126 Comment cmnt(masm, "[ Store to Slot");
7127 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
7128 ASSERT(slot != NULL);
7129 cgen_->StoreToSlot(slot, init_state);
7130 set_unloaded();
7131 break;
7132 }
7133
7134 case NAMED: {
7135 Comment cmnt(masm, "[ Store to named Property");
7136 Result answer = cgen_->EmitNamedStore(GetName(), false);
7137 cgen_->frame()->Push(&answer);
7138 set_unloaded();
7139 break;
7140 }
7141
7142 case KEYED: {
7143 Comment cmnt(masm, "[ Store to keyed Property");
7144 Property* property = expression()->AsProperty();
7145 ASSERT(property != NULL);
7146 Result answer = cgen_->EmitKeyedStore(property->key()->type());
7147 cgen_->frame()->Push(&answer);
7148 set_unloaded();
7149 break;
7150 }
7151
7152 case UNLOADED:
7153 case ILLEGAL:
7154 UNREACHABLE();
7155 }
7156 }
7157
7158
Generate(MacroAssembler * masm)7159 void FastNewClosureStub::Generate(MacroAssembler* masm) {
7160 // Clone the boilerplate in new space. Set the context to the
7161 // current context in esi.
7162 Label gc;
7163 __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
7164
7165 // Get the boilerplate function from the stack.
7166 __ mov(edx, Operand(esp, 1 * kPointerSize));
7167
7168 // Compute the function map in the current global context and set that
7169 // as the map of the allocated object.
7170 __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
7171 __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
7172 __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
7173 __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
7174
7175 // Clone the rest of the boilerplate fields. We don't have to update
7176 // the write barrier because the allocated object is in new space.
7177 for (int offset = kPointerSize;
7178 offset < JSFunction::kSize;
7179 offset += kPointerSize) {
7180 if (offset == JSFunction::kContextOffset) {
7181 __ mov(FieldOperand(eax, offset), esi);
7182 } else {
7183 __ mov(ebx, FieldOperand(edx, offset));
7184 __ mov(FieldOperand(eax, offset), ebx);
7185 }
7186 }
7187
7188 // Return and remove the on-stack parameter.
7189 __ ret(1 * kPointerSize);
7190
7191 // Create a new closure through the slower runtime call.
7192 __ bind(&gc);
7193 __ pop(ecx); // Temporarily remove return address.
7194 __ pop(edx);
7195 __ push(esi);
7196 __ push(edx);
7197 __ push(ecx); // Restore return address.
7198 __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
7199 }
7200
7201
Generate(MacroAssembler * masm)7202 void FastNewContextStub::Generate(MacroAssembler* masm) {
7203 // Try to allocate the context in new space.
7204 Label gc;
7205 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
7206 __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
7207 eax, ebx, ecx, &gc, TAG_OBJECT);
7208
7209 // Get the function from the stack.
7210 __ mov(ecx, Operand(esp, 1 * kPointerSize));
7211
7212 // Setup the object header.
7213 __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
7214 __ mov(FieldOperand(eax, Array::kLengthOffset), Immediate(length));
7215
7216 // Setup the fixed slots.
7217 __ xor_(ebx, Operand(ebx)); // Set to NULL.
7218 __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
7219 __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
7220 __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
7221 __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
7222
7223 // Copy the global object from the surrounding context. We go through the
7224 // context in the function (ecx) to match the allocation behavior we have
7225 // in the runtime system (see Heap::AllocateFunctionContext).
7226 __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset));
7227 __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX)));
7228 __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
7229
7230 // Initialize the rest of the slots to undefined.
7231 __ mov(ebx, Factory::undefined_value());
7232 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
7233 __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
7234 }
7235
7236 // Return and remove the on-stack parameter.
7237 __ mov(esi, Operand(eax));
7238 __ ret(1 * kPointerSize);
7239
7240 // Need to collect. Call into runtime system.
7241 __ bind(&gc);
7242 __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
7243 }
7244
7245
Generate(MacroAssembler * masm)7246 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
7247 // Stack layout on entry:
7248 //
7249 // [esp + kPointerSize]: constant elements.
7250 // [esp + (2 * kPointerSize)]: literal index.
7251 // [esp + (3 * kPointerSize)]: literals array.
7252
7253 // All sizes here are multiples of kPointerSize.
7254 int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
7255 int size = JSArray::kSize + elements_size;
7256
7257 // Load boilerplate object into ecx and check if we need to create a
7258 // boilerplate.
7259 Label slow_case;
7260 __ mov(ecx, Operand(esp, 3 * kPointerSize));
7261 __ mov(eax, Operand(esp, 2 * kPointerSize));
7262 ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
7263 __ mov(ecx, FieldOperand(ecx, eax, times_2, FixedArray::kHeaderSize));
7264 __ cmp(ecx, Factory::undefined_value());
7265 __ j(equal, &slow_case);
7266
7267 // Allocate both the JS array and the elements array in one big
7268 // allocation. This avoids multiple limit checks.
7269 __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
7270
7271 // Copy the JS array part.
7272 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
7273 if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
7274 __ mov(ebx, FieldOperand(ecx, i));
7275 __ mov(FieldOperand(eax, i), ebx);
7276 }
7277 }
7278
7279 if (length_ > 0) {
7280 // Get hold of the elements array of the boilerplate and setup the
7281 // elements pointer in the resulting object.
7282 __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
7283 __ lea(edx, Operand(eax, JSArray::kSize));
7284 __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
7285
7286 // Copy the elements array.
7287 for (int i = 0; i < elements_size; i += kPointerSize) {
7288 __ mov(ebx, FieldOperand(ecx, i));
7289 __ mov(FieldOperand(edx, i), ebx);
7290 }
7291 }
7292
7293 // Return and remove the on-stack parameters.
7294 __ ret(3 * kPointerSize);
7295
7296 __ bind(&slow_case);
7297 ExternalReference runtime(Runtime::kCreateArrayLiteralShallow);
7298 __ TailCallRuntime(runtime, 3, 1);
7299 }
7300
7301
7302 // NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
Generate(MacroAssembler * masm)7303 void ToBooleanStub::Generate(MacroAssembler* masm) {
7304 Label false_result, true_result, not_string;
7305 __ mov(eax, Operand(esp, 1 * kPointerSize));
7306
7307 // 'null' => false.
7308 __ cmp(eax, Factory::null_value());
7309 __ j(equal, &false_result);
7310
7311 // Get the map and type of the heap object.
7312 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
7313 __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
7314
7315 // Undetectable => false.
7316 __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
7317 __ and_(ebx, 1 << Map::kIsUndetectable);
7318 __ j(not_zero, &false_result);
7319
7320 // JavaScript object => true.
7321 __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
7322 __ j(above_equal, &true_result);
7323
7324 // String value => false iff empty.
7325 __ cmp(ecx, FIRST_NONSTRING_TYPE);
7326 __ j(above_equal, ¬_string);
7327 __ mov(edx, FieldOperand(eax, String::kLengthOffset));
7328 __ test(edx, Operand(edx));
7329 __ j(zero, &false_result);
7330 __ jmp(&true_result);
7331
7332 __ bind(¬_string);
7333 // HeapNumber => false iff +0, -0, or NaN.
7334 __ cmp(edx, Factory::heap_number_map());
7335 __ j(not_equal, &true_result);
7336 __ fldz();
7337 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
7338 __ FCmp();
7339 __ j(zero, &false_result);
7340 // Fall through to |true_result|.
7341
7342 // Return 1/0 for true/false in eax.
7343 __ bind(&true_result);
7344 __ mov(eax, 1);
7345 __ ret(1 * kPointerSize);
7346 __ bind(&false_result);
7347 __ mov(eax, 0);
7348 __ ret(1 * kPointerSize);
7349 }
7350
7351
GenerateCall(MacroAssembler * masm,Register left,Register right)7352 void GenericBinaryOpStub::GenerateCall(
7353 MacroAssembler* masm,
7354 Register left,
7355 Register right) {
7356 if (!ArgsInRegistersSupported()) {
7357 // Pass arguments on the stack.
7358 __ push(left);
7359 __ push(right);
7360 } else {
7361 // The calling convention with registers is left in edx and right in eax.
7362 Register left_arg = edx;
7363 Register right_arg = eax;
7364 if (!(left.is(left_arg) && right.is(right_arg))) {
7365 if (left.is(right_arg) && right.is(left_arg)) {
7366 if (IsOperationCommutative()) {
7367 SetArgsReversed();
7368 } else {
7369 __ xchg(left, right);
7370 }
7371 } else if (left.is(left_arg)) {
7372 __ mov(right_arg, right);
7373 } else if (right.is(right_arg)) {
7374 __ mov(left_arg, left);
7375 } else if (left.is(right_arg)) {
7376 if (IsOperationCommutative()) {
7377 __ mov(left_arg, right);
7378 SetArgsReversed();
7379 } else {
7380 // Order of moves important to avoid destroying left argument.
7381 __ mov(left_arg, left);
7382 __ mov(right_arg, right);
7383 }
7384 } else if (right.is(left_arg)) {
7385 if (IsOperationCommutative()) {
7386 __ mov(right_arg, left);
7387 SetArgsReversed();
7388 } else {
7389 // Order of moves important to avoid destroying right argument.
7390 __ mov(right_arg, right);
7391 __ mov(left_arg, left);
7392 }
7393 } else {
7394 // Order of moves is not important.
7395 __ mov(left_arg, left);
7396 __ mov(right_arg, right);
7397 }
7398 }
7399
7400 // Update flags to indicate that arguments are in registers.
7401 SetArgsInRegisters();
7402 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
7403 }
7404
7405 // Call the stub.
7406 __ CallStub(this);
7407 }
7408
7409
GenerateCall(MacroAssembler * masm,Register left,Smi * right)7410 void GenericBinaryOpStub::GenerateCall(
7411 MacroAssembler* masm,
7412 Register left,
7413 Smi* right) {
7414 if (!ArgsInRegistersSupported()) {
7415 // Pass arguments on the stack.
7416 __ push(left);
7417 __ push(Immediate(right));
7418 } else {
7419 // The calling convention with registers is left in edx and right in eax.
7420 Register left_arg = edx;
7421 Register right_arg = eax;
7422 if (left.is(left_arg)) {
7423 __ mov(right_arg, Immediate(right));
7424 } else if (left.is(right_arg) && IsOperationCommutative()) {
7425 __ mov(left_arg, Immediate(right));
7426 SetArgsReversed();
7427 } else {
7428 // For non-commutative operations, left and right_arg might be
7429 // the same register. Therefore, the order of the moves is
7430 // important here in order to not overwrite left before moving
7431 // it to left_arg.
7432 __ mov(left_arg, left);
7433 __ mov(right_arg, Immediate(right));
7434 }
7435
7436 // Update flags to indicate that arguments are in registers.
7437 SetArgsInRegisters();
7438 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
7439 }
7440
7441 // Call the stub.
7442 __ CallStub(this);
7443 }
7444
7445
GenerateCall(MacroAssembler * masm,Smi * left,Register right)7446 void GenericBinaryOpStub::GenerateCall(
7447 MacroAssembler* masm,
7448 Smi* left,
7449 Register right) {
7450 if (!ArgsInRegistersSupported()) {
7451 // Pass arguments on the stack.
7452 __ push(Immediate(left));
7453 __ push(right);
7454 } else {
7455 // The calling convention with registers is left in edx and right in eax.
7456 Register left_arg = edx;
7457 Register right_arg = eax;
7458 if (right.is(right_arg)) {
7459 __ mov(left_arg, Immediate(left));
7460 } else if (right.is(left_arg) && IsOperationCommutative()) {
7461 __ mov(right_arg, Immediate(left));
7462 SetArgsReversed();
7463 } else {
7464 // For non-commutative operations, right and left_arg might be
7465 // the same register. Therefore, the order of the moves is
7466 // important here in order to not overwrite right before moving
7467 // it to right_arg.
7468 __ mov(right_arg, right);
7469 __ mov(left_arg, Immediate(left));
7470 }
7471 // Update flags to indicate that arguments are in registers.
7472 SetArgsInRegisters();
7473 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
7474 }
7475
7476 // Call the stub.
7477 __ CallStub(this);
7478 }
7479
7480
GenerateCall(MacroAssembler * masm,VirtualFrame * frame,Result * left,Result * right)7481 Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
7482 VirtualFrame* frame,
7483 Result* left,
7484 Result* right) {
7485 if (ArgsInRegistersSupported()) {
7486 SetArgsInRegisters();
7487 return frame->CallStub(this, left, right);
7488 } else {
7489 frame->Push(left);
7490 frame->Push(right);
7491 return frame->CallStub(this, 2);
7492 }
7493 }
7494
7495
GenerateSmiCode(MacroAssembler * masm,Label * slow)7496 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
7497 // 1. Move arguments into edx, eax except for DIV and MOD, which need the
7498 // dividend in eax and edx free for the division. Use eax, ebx for those.
7499 Comment load_comment(masm, "-- Load arguments");
7500 Register left = edx;
7501 Register right = eax;
7502 if (op_ == Token::DIV || op_ == Token::MOD) {
7503 left = eax;
7504 right = ebx;
7505 if (HasArgsInRegisters()) {
7506 __ mov(ebx, eax);
7507 __ mov(eax, edx);
7508 }
7509 }
7510 if (!HasArgsInRegisters()) {
7511 __ mov(right, Operand(esp, 1 * kPointerSize));
7512 __ mov(left, Operand(esp, 2 * kPointerSize));
7513 }
7514
7515 // 2. Prepare the smi check of both operands by oring them together.
7516 Comment smi_check_comment(masm, "-- Smi check arguments");
7517 Label not_smis;
7518 Register combined = ecx;
7519 ASSERT(!left.is(combined) && !right.is(combined));
7520 switch (op_) {
7521 case Token::BIT_OR:
7522 // Perform the operation into eax and smi check the result. Preserve
7523 // eax in case the result is not a smi.
7524 ASSERT(!left.is(ecx) && !right.is(ecx));
7525 __ mov(ecx, right);
7526 __ or_(right, Operand(left)); // Bitwise or is commutative.
7527 combined = right;
7528 break;
7529
7530 case Token::BIT_XOR:
7531 case Token::BIT_AND:
7532 case Token::ADD:
7533 case Token::SUB:
7534 case Token::MUL:
7535 case Token::DIV:
7536 case Token::MOD:
7537 __ mov(combined, right);
7538 __ or_(combined, Operand(left));
7539 break;
7540
7541 case Token::SHL:
7542 case Token::SAR:
7543 case Token::SHR:
7544 // Move the right operand into ecx for the shift operation, use eax
7545 // for the smi check register.
7546 ASSERT(!left.is(ecx) && !right.is(ecx));
7547 __ mov(ecx, right);
7548 __ or_(right, Operand(left));
7549 combined = right;
7550 break;
7551
7552 default:
7553 break;
7554 }
7555
7556 // 3. Perform the smi check of the operands.
7557 ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
7558 __ test(combined, Immediate(kSmiTagMask));
7559 __ j(not_zero, ¬_smis, not_taken);
7560
7561 // 4. Operands are both smis, perform the operation leaving the result in
7562 // eax and check the result if necessary.
7563 Comment perform_smi(masm, "-- Perform smi operation");
7564 Label use_fp_on_smis;
7565 switch (op_) {
7566 case Token::BIT_OR:
7567 // Nothing to do.
7568 break;
7569
7570 case Token::BIT_XOR:
7571 ASSERT(right.is(eax));
7572 __ xor_(right, Operand(left)); // Bitwise xor is commutative.
7573 break;
7574
7575 case Token::BIT_AND:
7576 ASSERT(right.is(eax));
7577 __ and_(right, Operand(left)); // Bitwise and is commutative.
7578 break;
7579
7580 case Token::SHL:
7581 // Remove tags from operands (but keep sign).
7582 __ SmiUntag(left);
7583 __ SmiUntag(ecx);
7584 // Perform the operation.
7585 __ shl_cl(left);
7586 // Check that the *signed* result fits in a smi.
7587 __ cmp(left, 0xc0000000);
7588 __ j(sign, &use_fp_on_smis, not_taken);
7589 // Tag the result and store it in register eax.
7590 __ SmiTag(left);
7591 __ mov(eax, left);
7592 break;
7593
7594 case Token::SAR:
7595 // Remove tags from operands (but keep sign).
7596 __ SmiUntag(left);
7597 __ SmiUntag(ecx);
7598 // Perform the operation.
7599 __ sar_cl(left);
7600 // Tag the result and store it in register eax.
7601 __ SmiTag(left);
7602 __ mov(eax, left);
7603 break;
7604
7605 case Token::SHR:
7606 // Remove tags from operands (but keep sign).
7607 __ SmiUntag(left);
7608 __ SmiUntag(ecx);
7609 // Perform the operation.
7610 __ shr_cl(left);
7611 // Check that the *unsigned* result fits in a smi.
7612 // Neither of the two high-order bits can be set:
7613 // - 0x80000000: high bit would be lost when smi tagging.
7614 // - 0x40000000: this number would convert to negative when
7615 // Smi tagging these two cases can only happen with shifts
7616 // by 0 or 1 when handed a valid smi.
7617 __ test(left, Immediate(0xc0000000));
7618 __ j(not_zero, slow, not_taken);
7619 // Tag the result and store it in register eax.
7620 __ SmiTag(left);
7621 __ mov(eax, left);
7622 break;
7623
7624 case Token::ADD:
7625 ASSERT(right.is(eax));
7626 __ add(right, Operand(left)); // Addition is commutative.
7627 __ j(overflow, &use_fp_on_smis, not_taken);
7628 break;
7629
7630 case Token::SUB:
7631 __ sub(left, Operand(right));
7632 __ j(overflow, &use_fp_on_smis, not_taken);
7633 __ mov(eax, left);
7634 break;
7635
7636 case Token::MUL:
7637 // If the smi tag is 0 we can just leave the tag on one operand.
7638 ASSERT(kSmiTag == 0); // Adjust code below if not the case.
7639 // We can't revert the multiplication if the result is not a smi
7640 // so save the right operand.
7641 __ mov(ebx, right);
7642 // Remove tag from one of the operands (but keep sign).
7643 __ SmiUntag(right);
7644 // Do multiplication.
7645 __ imul(right, Operand(left)); // Multiplication is commutative.
7646 __ j(overflow, &use_fp_on_smis, not_taken);
7647 // Check for negative zero result. Use combined = left | right.
7648 __ NegativeZeroTest(right, combined, &use_fp_on_smis);
7649 break;
7650
7651 case Token::DIV:
7652 // We can't revert the division if the result is not a smi so
7653 // save the left operand.
7654 __ mov(edi, left);
7655 // Check for 0 divisor.
7656 __ test(right, Operand(right));
7657 __ j(zero, &use_fp_on_smis, not_taken);
7658 // Sign extend left into edx:eax.
7659 ASSERT(left.is(eax));
7660 __ cdq();
7661 // Divide edx:eax by right.
7662 __ idiv(right);
7663 // Check for the corner case of dividing the most negative smi by
7664 // -1. We cannot use the overflow flag, since it is not set by idiv
7665 // instruction.
7666 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
7667 __ cmp(eax, 0x40000000);
7668 __ j(equal, &use_fp_on_smis);
7669 // Check for negative zero result. Use combined = left | right.
7670 __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
7671 // Check that the remainder is zero.
7672 __ test(edx, Operand(edx));
7673 __ j(not_zero, &use_fp_on_smis);
7674 // Tag the result and store it in register eax.
7675 __ SmiTag(eax);
7676 break;
7677
7678 case Token::MOD:
7679 // Check for 0 divisor.
7680 __ test(right, Operand(right));
7681 __ j(zero, ¬_smis, not_taken);
7682
7683 // Sign extend left into edx:eax.
7684 ASSERT(left.is(eax));
7685 __ cdq();
7686 // Divide edx:eax by right.
7687 __ idiv(right);
7688 // Check for negative zero result. Use combined = left | right.
7689 __ NegativeZeroTest(edx, combined, slow);
7690 // Move remainder to register eax.
7691 __ mov(eax, edx);
7692 break;
7693
7694 default:
7695 UNREACHABLE();
7696 }
7697
7698 // 5. Emit return of result in eax.
7699 GenerateReturn(masm);
7700
7701 // 6. For some operations emit inline code to perform floating point
7702 // operations on known smis (e.g., if the result of the operation
7703 // overflowed the smi range).
7704 switch (op_) {
7705 case Token::SHL: {
7706 Comment perform_float(masm, "-- Perform float operation on smis");
7707 __ bind(&use_fp_on_smis);
7708 // Result we want is in left == edx, so we can put the allocated heap
7709 // number in eax.
7710 __ AllocateHeapNumber(eax, ecx, ebx, slow);
7711 // Store the result in the HeapNumber and return.
7712 if (CpuFeatures::IsSupported(SSE2)) {
7713 CpuFeatures::Scope use_sse2(SSE2);
7714 __ cvtsi2sd(xmm0, Operand(left));
7715 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
7716 } else {
7717 // It's OK to overwrite the right argument on the stack because we
7718 // are about to return.
7719 __ mov(Operand(esp, 1 * kPointerSize), left);
7720 __ fild_s(Operand(esp, 1 * kPointerSize));
7721 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
7722 }
7723 GenerateReturn(masm);
7724 break;
7725 }
7726
7727 case Token::ADD:
7728 case Token::SUB:
7729 case Token::MUL:
7730 case Token::DIV: {
7731 Comment perform_float(masm, "-- Perform float operation on smis");
7732 __ bind(&use_fp_on_smis);
7733 // Restore arguments to edx, eax.
7734 switch (op_) {
7735 case Token::ADD:
7736 // Revert right = right + left.
7737 __ sub(right, Operand(left));
7738 break;
7739 case Token::SUB:
7740 // Revert left = left - right.
7741 __ add(left, Operand(right));
7742 break;
7743 case Token::MUL:
7744 // Right was clobbered but a copy is in ebx.
7745 __ mov(right, ebx);
7746 break;
7747 case Token::DIV:
7748 // Left was clobbered but a copy is in edi. Right is in ebx for
7749 // division.
7750 __ mov(edx, edi);
7751 __ mov(eax, right);
7752 break;
7753 default: UNREACHABLE();
7754 break;
7755 }
7756 __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
7757 if (CpuFeatures::IsSupported(SSE2)) {
7758 CpuFeatures::Scope use_sse2(SSE2);
7759 FloatingPointHelper::LoadSSE2Smis(masm, ebx);
7760 switch (op_) {
7761 case Token::ADD: __ addsd(xmm0, xmm1); break;
7762 case Token::SUB: __ subsd(xmm0, xmm1); break;
7763 case Token::MUL: __ mulsd(xmm0, xmm1); break;
7764 case Token::DIV: __ divsd(xmm0, xmm1); break;
7765 default: UNREACHABLE();
7766 }
7767 __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
7768 } else { // SSE2 not available, use FPU.
7769 FloatingPointHelper::LoadFloatSmis(masm, ebx);
7770 switch (op_) {
7771 case Token::ADD: __ faddp(1); break;
7772 case Token::SUB: __ fsubp(1); break;
7773 case Token::MUL: __ fmulp(1); break;
7774 case Token::DIV: __ fdivp(1); break;
7775 default: UNREACHABLE();
7776 }
7777 __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
7778 }
7779 __ mov(eax, ecx);
7780 GenerateReturn(masm);
7781 break;
7782 }
7783
7784 default:
7785 break;
7786 }
7787
7788 // 7. Non-smi operands, fall out to the non-smi code with the operands in
7789 // edx and eax.
7790 Comment done_comment(masm, "-- Enter non-smi code");
7791 __ bind(¬_smis);
7792 switch (op_) {
7793 case Token::BIT_OR:
7794 case Token::SHL:
7795 case Token::SAR:
7796 case Token::SHR:
7797 // Right operand is saved in ecx and eax was destroyed by the smi
7798 // check.
7799 __ mov(eax, ecx);
7800 break;
7801
7802 case Token::DIV:
7803 case Token::MOD:
7804 // Operands are in eax, ebx at this point.
7805 __ mov(edx, eax);
7806 __ mov(eax, ebx);
7807 break;
7808
7809 default:
7810 break;
7811 }
7812 }
7813
7814
Generate(MacroAssembler * masm)7815 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
7816 Label call_runtime;
7817
7818 __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
7819
7820 // Generate fast case smi code if requested. This flag is set when the fast
7821 // case smi code is not generated by the caller. Generating it here will speed
7822 // up common operations.
7823 if (HasSmiCodeInStub()) {
7824 GenerateSmiCode(masm, &call_runtime);
7825 } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
7826 GenerateLoadArguments(masm);
7827 }
7828
7829 // Floating point case.
7830 switch (op_) {
7831 case Token::ADD:
7832 case Token::SUB:
7833 case Token::MUL:
7834 case Token::DIV: {
7835 if (CpuFeatures::IsSupported(SSE2)) {
7836 CpuFeatures::Scope use_sse2(SSE2);
7837 if (NumberInfo::IsNumber(operands_type_)) {
7838 if (FLAG_debug_code) {
7839 // Assert at runtime that inputs are only numbers.
7840 __ AbortIfNotNumber(edx,
7841 "GenericBinaryOpStub operand not a number.");
7842 __ AbortIfNotNumber(eax,
7843 "GenericBinaryOpStub operand not a number.");
7844 }
7845 FloatingPointHelper::LoadSSE2Operands(masm);
7846 } else {
7847 FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
7848 }
7849
7850 switch (op_) {
7851 case Token::ADD: __ addsd(xmm0, xmm1); break;
7852 case Token::SUB: __ subsd(xmm0, xmm1); break;
7853 case Token::MUL: __ mulsd(xmm0, xmm1); break;
7854 case Token::DIV: __ divsd(xmm0, xmm1); break;
7855 default: UNREACHABLE();
7856 }
7857 GenerateHeapResultAllocation(masm, &call_runtime);
7858 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
7859 GenerateReturn(masm);
7860 } else { // SSE2 not available, use FPU.
7861 if (NumberInfo::IsNumber(operands_type_)) {
7862 if (FLAG_debug_code) {
7863 // Assert at runtime that inputs are only numbers.
7864 __ AbortIfNotNumber(edx,
7865 "GenericBinaryOpStub operand not a number.");
7866 __ AbortIfNotNumber(eax,
7867 "GenericBinaryOpStub operand not a number.");
7868 }
7869 } else {
7870 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
7871 }
7872 FloatingPointHelper::LoadFloatOperands(
7873 masm,
7874 ecx,
7875 FloatingPointHelper::ARGS_IN_REGISTERS);
7876 switch (op_) {
7877 case Token::ADD: __ faddp(1); break;
7878 case Token::SUB: __ fsubp(1); break;
7879 case Token::MUL: __ fmulp(1); break;
7880 case Token::DIV: __ fdivp(1); break;
7881 default: UNREACHABLE();
7882 }
7883 Label after_alloc_failure;
7884 GenerateHeapResultAllocation(masm, &after_alloc_failure);
7885 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
7886 GenerateReturn(masm);
7887 __ bind(&after_alloc_failure);
7888 __ ffree();
7889 __ jmp(&call_runtime);
7890 }
7891 }
7892 case Token::MOD: {
7893 // For MOD we go directly to runtime in the non-smi case.
7894 break;
7895 }
7896 case Token::BIT_OR:
7897 case Token::BIT_AND:
7898 case Token::BIT_XOR:
7899 case Token::SAR:
7900 case Token::SHL:
7901 case Token::SHR: {
7902 Label non_smi_result;
7903 FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
7904 switch (op_) {
7905 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
7906 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
7907 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
7908 case Token::SAR: __ sar_cl(eax); break;
7909 case Token::SHL: __ shl_cl(eax); break;
7910 case Token::SHR: __ shr_cl(eax); break;
7911 default: UNREACHABLE();
7912 }
7913 if (op_ == Token::SHR) {
7914 // Check if result is non-negative and fits in a smi.
7915 __ test(eax, Immediate(0xc0000000));
7916 __ j(not_zero, &call_runtime);
7917 } else {
7918 // Check if result fits in a smi.
7919 __ cmp(eax, 0xc0000000);
7920 __ j(negative, &non_smi_result);
7921 }
7922 // Tag smi result and return.
7923 __ SmiTag(eax);
7924 GenerateReturn(masm);
7925
7926 // All ops except SHR return a signed int32 that we load in a HeapNumber.
7927 if (op_ != Token::SHR) {
7928 __ bind(&non_smi_result);
7929 // Allocate a heap number if needed.
7930 __ mov(ebx, Operand(eax)); // ebx: result
7931 Label skip_allocation;
7932 switch (mode_) {
7933 case OVERWRITE_LEFT:
7934 case OVERWRITE_RIGHT:
7935 // If the operand was an object, we skip the
7936 // allocation of a heap number.
7937 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
7938 1 * kPointerSize : 2 * kPointerSize));
7939 __ test(eax, Immediate(kSmiTagMask));
7940 __ j(not_zero, &skip_allocation, not_taken);
7941 // Fall through!
7942 case NO_OVERWRITE:
7943 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
7944 __ bind(&skip_allocation);
7945 break;
7946 default: UNREACHABLE();
7947 }
7948 // Store the result in the HeapNumber and return.
7949 if (CpuFeatures::IsSupported(SSE2)) {
7950 CpuFeatures::Scope use_sse2(SSE2);
7951 __ cvtsi2sd(xmm0, Operand(ebx));
7952 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
7953 } else {
7954 __ mov(Operand(esp, 1 * kPointerSize), ebx);
7955 __ fild_s(Operand(esp, 1 * kPointerSize));
7956 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
7957 }
7958 GenerateReturn(masm);
7959 }
7960 break;
7961 }
7962 default: UNREACHABLE(); break;
7963 }
7964
7965 // If all else fails, use the runtime system to get the correct
7966 // result. If arguments was passed in registers now place them on the
7967 // stack in the correct order below the return address.
7968 __ bind(&call_runtime);
7969 if (HasArgsInRegisters()) {
7970 __ pop(ecx);
7971 if (HasArgsReversed()) {
7972 __ push(eax);
7973 __ push(edx);
7974 } else {
7975 __ push(edx);
7976 __ push(eax);
7977 }
7978 __ push(ecx);
7979 }
7980 switch (op_) {
7981 case Token::ADD: {
7982 // Test for string arguments before calling runtime.
7983 Label not_strings, not_string1, string1, string1_smi2;
7984 Result answer;
7985 __ test(edx, Immediate(kSmiTagMask));
7986 __ j(zero, ¬_string1);
7987 __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ecx);
7988 __ j(above_equal, ¬_string1);
7989
7990 // First argument is a string, test second.
7991 __ test(eax, Immediate(kSmiTagMask));
7992 __ j(zero, &string1_smi2);
7993 __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
7994 __ j(above_equal, &string1);
7995
7996 // First and second argument are strings. Jump to the string add stub.
7997 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
7998 __ TailCallStub(&string_add_stub);
7999
8000 __ bind(&string1_smi2);
8001 // First argument is a string, second is a smi. Try to lookup the number
8002 // string for the smi in the number string cache.
8003 NumberToStringStub::GenerateLookupNumberStringCache(
8004 masm, eax, edi, ebx, ecx, true, &string1);
8005
8006 // Call the string add stub to make the result.
8007 __ EnterInternalFrame();
8008 __ push(edx); // Original first argument.
8009 __ push(edi); // Number to string result for second argument.
8010 __ CallStub(&string_add_stub);
8011 __ LeaveInternalFrame();
8012 __ ret(2 * kPointerSize);
8013
8014 __ bind(&string1);
8015 __ InvokeBuiltin(
8016 HasArgsReversed() ?
8017 Builtins::STRING_ADD_RIGHT :
8018 Builtins::STRING_ADD_LEFT,
8019 JUMP_FUNCTION);
8020
8021 // First argument was not a string, test second.
8022 __ bind(¬_string1);
8023 __ test(eax, Immediate(kSmiTagMask));
8024 __ j(zero, ¬_strings);
8025 __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
8026 __ j(above_equal, ¬_strings);
8027
8028 // Only second argument is a string.
8029 __ InvokeBuiltin(
8030 HasArgsReversed() ?
8031 Builtins::STRING_ADD_LEFT :
8032 Builtins::STRING_ADD_RIGHT,
8033 JUMP_FUNCTION);
8034
8035 __ bind(¬_strings);
8036 // Neither argument is a string.
8037 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
8038 break;
8039 }
8040 case Token::SUB:
8041 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
8042 break;
8043 case Token::MUL:
8044 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
8045 break;
8046 case Token::DIV:
8047 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
8048 break;
8049 case Token::MOD:
8050 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
8051 break;
8052 case Token::BIT_OR:
8053 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
8054 break;
8055 case Token::BIT_AND:
8056 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
8057 break;
8058 case Token::BIT_XOR:
8059 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
8060 break;
8061 case Token::SAR:
8062 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
8063 break;
8064 case Token::SHL:
8065 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
8066 break;
8067 case Token::SHR:
8068 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
8069 break;
8070 default:
8071 UNREACHABLE();
8072 }
8073 }
8074
8075
GenerateHeapResultAllocation(MacroAssembler * masm,Label * alloc_failure)8076 void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
8077 Label* alloc_failure) {
8078 Label skip_allocation;
8079 OverwriteMode mode = mode_;
8080 if (HasArgsReversed()) {
8081 if (mode == OVERWRITE_RIGHT) {
8082 mode = OVERWRITE_LEFT;
8083 } else if (mode == OVERWRITE_LEFT) {
8084 mode = OVERWRITE_RIGHT;
8085 }
8086 }
8087 switch (mode) {
8088 case OVERWRITE_LEFT: {
8089 // If the argument in edx is already an object, we skip the
8090 // allocation of a heap number.
8091 __ test(edx, Immediate(kSmiTagMask));
8092 __ j(not_zero, &skip_allocation, not_taken);
8093 // Allocate a heap number for the result. Keep eax and edx intact
8094 // for the possible runtime call.
8095 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
8096 // Now edx can be overwritten losing one of the arguments as we are
8097 // now done and will not need it any more.
8098 __ mov(edx, Operand(ebx));
8099 __ bind(&skip_allocation);
8100 // Use object in edx as a result holder
8101 __ mov(eax, Operand(edx));
8102 break;
8103 }
8104 case OVERWRITE_RIGHT:
8105 // If the argument in eax is already an object, we skip the
8106 // allocation of a heap number.
8107 __ test(eax, Immediate(kSmiTagMask));
8108 __ j(not_zero, &skip_allocation, not_taken);
8109 // Fall through!
8110 case NO_OVERWRITE:
8111 // Allocate a heap number for the result. Keep eax and edx intact
8112 // for the possible runtime call.
8113 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
8114 // Now eax can be overwritten losing one of the arguments as we are
8115 // now done and will not need it any more.
8116 __ mov(eax, ebx);
8117 __ bind(&skip_allocation);
8118 break;
8119 default: UNREACHABLE();
8120 }
8121 }
8122
8123
GenerateLoadArguments(MacroAssembler * masm)8124 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
8125 // If arguments are not passed in registers read them from the stack.
8126 if (!HasArgsInRegisters()) {
8127 __ mov(eax, Operand(esp, 1 * kPointerSize));
8128 __ mov(edx, Operand(esp, 2 * kPointerSize));
8129 }
8130 }
8131
8132
GenerateReturn(MacroAssembler * masm)8133 void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
8134 // If arguments are not passed in registers remove them from the stack before
8135 // returning.
8136 if (!HasArgsInRegisters()) {
8137 __ ret(2 * kPointerSize); // Remove both operands
8138 } else {
8139 __ ret(0);
8140 }
8141 }
8142
8143
Generate(MacroAssembler * masm)8144 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
8145 // Input on stack:
8146 // esp[4]: argument (should be number).
8147 // esp[0]: return address.
8148 // Test that eax is a number.
8149 Label runtime_call;
8150 Label runtime_call_clear_stack;
8151 Label input_not_smi;
8152 Label loaded;
8153 __ mov(eax, Operand(esp, kPointerSize));
8154 __ test(eax, Immediate(kSmiTagMask));
8155 __ j(not_zero, &input_not_smi);
8156 // Input is a smi. Untag and load it onto the FPU stack.
8157 // Then load the low and high words of the double into ebx, edx.
8158 ASSERT_EQ(1, kSmiTagSize);
8159 __ sar(eax, 1);
8160 __ sub(Operand(esp), Immediate(2 * kPointerSize));
8161 __ mov(Operand(esp, 0), eax);
8162 __ fild_s(Operand(esp, 0));
8163 __ fst_d(Operand(esp, 0));
8164 __ pop(edx);
8165 __ pop(ebx);
8166 __ jmp(&loaded);
8167 __ bind(&input_not_smi);
8168 // Check if input is a HeapNumber.
8169 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
8170 __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
8171 __ j(not_equal, &runtime_call);
8172 // Input is a HeapNumber. Push it on the FPU stack and load its
8173 // low and high words into ebx, edx.
8174 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
8175 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
8176 __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
8177
8178 __ bind(&loaded);
8179 // ST[0] == double value
8180 // ebx = low 32 bits of double value
8181 // edx = high 32 bits of double value
8182 // Compute hash:
8183 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
8184 __ mov(ecx, ebx);
8185 __ xor_(ecx, Operand(edx));
8186 __ mov(eax, ecx);
8187 __ sar(eax, 16);
8188 __ xor_(ecx, Operand(eax));
8189 __ mov(eax, ecx);
8190 __ sar(eax, 8);
8191 __ xor_(ecx, Operand(eax));
8192 ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
8193 __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
8194 // ST[0] == double value.
8195 // ebx = low 32 bits of double value.
8196 // edx = high 32 bits of double value.
8197 // ecx = TranscendentalCache::hash(double value).
8198 __ mov(eax,
8199 Immediate(ExternalReference::transcendental_cache_array_address()));
8200 // Eax points to cache array.
8201 __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0])));
8202 // Eax points to the cache for the type type_.
8203 // If NULL, the cache hasn't been initialized yet, so go through runtime.
8204 __ test(eax, Operand(eax));
8205 __ j(zero, &runtime_call_clear_stack);
8206 #ifdef DEBUG
8207 // Check that the layout of cache elements match expectations.
8208 { // NOLINT - doesn't like a single brace on a line.
8209 TranscendentalCache::Element test_elem[2];
8210 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
8211 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
8212 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
8213 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
8214 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
8215 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
8216 CHECK_EQ(0, elem_in0 - elem_start);
8217 CHECK_EQ(kIntSize, elem_in1 - elem_start);
8218 CHECK_EQ(2 * kIntSize, elem_out - elem_start);
8219 }
8220 #endif
8221 // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
8222 __ lea(ecx, Operand(ecx, ecx, times_2, 0));
8223 __ lea(ecx, Operand(eax, ecx, times_4, 0));
8224 // Check if cache matches: Double value is stored in uint32_t[2] array.
8225 Label cache_miss;
8226 __ cmp(ebx, Operand(ecx, 0));
8227 __ j(not_equal, &cache_miss);
8228 __ cmp(edx, Operand(ecx, kIntSize));
8229 __ j(not_equal, &cache_miss);
8230 // Cache hit!
8231 __ mov(eax, Operand(ecx, 2 * kIntSize));
8232 __ fstp(0);
8233 __ ret(kPointerSize);
8234
8235 __ bind(&cache_miss);
8236 // Update cache with new value.
8237 // We are short on registers, so use no_reg as scratch.
8238 // This gives slightly larger code.
8239 __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
8240 GenerateOperation(masm);
8241 __ mov(Operand(ecx, 0), ebx);
8242 __ mov(Operand(ecx, kIntSize), edx);
8243 __ mov(Operand(ecx, 2 * kIntSize), eax);
8244 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
8245 __ ret(kPointerSize);
8246
8247 __ bind(&runtime_call_clear_stack);
8248 __ fstp(0);
8249 __ bind(&runtime_call);
8250 __ TailCallRuntime(ExternalReference(RuntimeFunction()), 1, 1);
8251 }
8252
8253
RuntimeFunction()8254 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
8255 switch (type_) {
8256 // Add more cases when necessary.
8257 case TranscendentalCache::SIN: return Runtime::kMath_sin;
8258 case TranscendentalCache::COS: return Runtime::kMath_cos;
8259 default:
8260 UNIMPLEMENTED();
8261 return Runtime::kAbort;
8262 }
8263 }
8264
8265
GenerateOperation(MacroAssembler * masm)8266 void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
8267 // Only free register is edi.
8268 Label done;
8269 ASSERT(type_ == TranscendentalCache::SIN ||
8270 type_ == TranscendentalCache::COS);
8271 // More transcendental types can be added later.
8272
8273 // Both fsin and fcos require arguments in the range +/-2^63 and
8274 // return NaN for infinities and NaN. They can share all code except
8275 // the actual fsin/fcos operation.
8276 Label in_range;
8277 // If argument is outside the range -2^63..2^63, fsin/cos doesn't
8278 // work. We must reduce it to the appropriate range.
8279 __ mov(edi, edx);
8280 __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only.
8281 int supported_exponent_limit =
8282 (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
8283 __ cmp(Operand(edi), Immediate(supported_exponent_limit));
8284 __ j(below, &in_range, taken);
8285 // Check for infinity and NaN. Both return NaN for sin.
8286 __ cmp(Operand(edi), Immediate(0x7ff00000));
8287 Label non_nan_result;
8288 __ j(not_equal, &non_nan_result, taken);
8289 // Input is +/-Infinity or NaN. Result is NaN.
8290 __ fstp(0);
8291 // NaN is represented by 0x7ff8000000000000.
8292 __ push(Immediate(0x7ff80000));
8293 __ push(Immediate(0));
8294 __ fld_d(Operand(esp, 0));
8295 __ add(Operand(esp), Immediate(2 * kPointerSize));
8296 __ jmp(&done);
8297
8298 __ bind(&non_nan_result);
8299
8300 // Use fpmod to restrict argument to the range +/-2*PI.
8301 __ mov(edi, eax); // Save eax before using fnstsw_ax.
8302 __ fldpi();
8303 __ fadd(0);
8304 __ fld(1);
8305 // FPU Stack: input, 2*pi, input.
8306 {
8307 Label no_exceptions;
8308 __ fwait();
8309 __ fnstsw_ax();
8310 // Clear if Illegal Operand or Zero Division exceptions are set.
8311 __ test(Operand(eax), Immediate(5));
8312 __ j(zero, &no_exceptions);
8313 __ fnclex();
8314 __ bind(&no_exceptions);
8315 }
8316
8317 // Compute st(0) % st(1)
8318 {
8319 Label partial_remainder_loop;
8320 __ bind(&partial_remainder_loop);
8321 __ fprem1();
8322 __ fwait();
8323 __ fnstsw_ax();
8324 __ test(Operand(eax), Immediate(0x400 /* C2 */));
8325 // If C2 is set, computation only has partial result. Loop to
8326 // continue computation.
8327 __ j(not_zero, &partial_remainder_loop);
8328 }
8329 // FPU Stack: input, 2*pi, input % 2*pi
8330 __ fstp(2);
8331 __ fstp(0);
8332 __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer).
8333
8334 // FPU Stack: input % 2*pi
8335 __ bind(&in_range);
8336 switch (type_) {
8337 case TranscendentalCache::SIN:
8338 __ fsin();
8339 break;
8340 case TranscendentalCache::COS:
8341 __ fcos();
8342 break;
8343 default:
8344 UNREACHABLE();
8345 }
8346 __ bind(&done);
8347 }
8348
8349
8350 // Get the integer part of a heap number. Surprisingly, all this bit twiddling
8351 // is faster than using the built-in instructions on floating point registers.
8352 // Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
8353 // trashed registers.
IntegerConvert(MacroAssembler * masm,Register source,bool use_sse3,Label * conversion_failure)8354 void IntegerConvert(MacroAssembler* masm,
8355 Register source,
8356 bool use_sse3,
8357 Label* conversion_failure) {
8358 ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
8359 Label done, right_exponent, normal_exponent;
8360 Register scratch = ebx;
8361 Register scratch2 = edi;
8362 // Get exponent word.
8363 __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
8364 // Get exponent alone in scratch2.
8365 __ mov(scratch2, scratch);
8366 __ and_(scratch2, HeapNumber::kExponentMask);
8367 if (use_sse3) {
8368 CpuFeatures::Scope scope(SSE3);
8369 // Check whether the exponent is too big for a 64 bit signed integer.
8370 static const uint32_t kTooBigExponent =
8371 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
8372 __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
8373 __ j(greater_equal, conversion_failure);
8374 // Load x87 register with heap number.
8375 __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
8376 // Reserve space for 64 bit answer.
8377 __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
8378 // Do conversion, which cannot fail because we checked the exponent.
8379 __ fisttp_d(Operand(esp, 0));
8380 __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
8381 __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
8382 } else {
8383 // Load ecx with zero. We use this either for the final shift or
8384 // for the answer.
8385 __ xor_(ecx, Operand(ecx));
8386 // Check whether the exponent matches a 32 bit signed int that cannot be
8387 // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
8388 // exponent is 30 (biased). This is the exponent that we are fastest at and
8389 // also the highest exponent we can handle here.
8390 const uint32_t non_smi_exponent =
8391 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
8392 __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
8393 // If we have a match of the int32-but-not-Smi exponent then skip some
8394 // logic.
8395 __ j(equal, &right_exponent);
8396 // If the exponent is higher than that then go to slow case. This catches
8397 // numbers that don't fit in a signed int32, infinities and NaNs.
8398 __ j(less, &normal_exponent);
8399
8400 {
8401 // Handle a big exponent. The only reason we have this code is that the
8402 // >>> operator has a tendency to generate numbers with an exponent of 31.
8403 const uint32_t big_non_smi_exponent =
8404 (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
8405 __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
8406 __ j(not_equal, conversion_failure);
8407 // We have the big exponent, typically from >>>. This means the number is
8408 // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
8409 __ mov(scratch2, scratch);
8410 __ and_(scratch2, HeapNumber::kMantissaMask);
8411 // Put back the implicit 1.
8412 __ or_(scratch2, 1 << HeapNumber::kExponentShift);
8413 // Shift up the mantissa bits to take up the space the exponent used to
8414 // take. We just orred in the implicit bit so that took care of one and
8415 // we want to use the full unsigned range so we subtract 1 bit from the
8416 // shift distance.
8417 const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
8418 __ shl(scratch2, big_shift_distance);
8419 // Get the second half of the double.
8420 __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
8421 // Shift down 21 bits to get the most significant 11 bits or the low
8422 // mantissa word.
8423 __ shr(ecx, 32 - big_shift_distance);
8424 __ or_(ecx, Operand(scratch2));
8425 // We have the answer in ecx, but we may need to negate it.
8426 __ test(scratch, Operand(scratch));
8427 __ j(positive, &done);
8428 __ neg(ecx);
8429 __ jmp(&done);
8430 }
8431
8432 __ bind(&normal_exponent);
8433 // Exponent word in scratch, exponent part of exponent word in scratch2.
8434 // Zero in ecx.
8435 // We know the exponent is smaller than 30 (biased). If it is less than
8436 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
8437 // it rounds to zero.
8438 const uint32_t zero_exponent =
8439 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
8440 __ sub(Operand(scratch2), Immediate(zero_exponent));
8441 // ecx already has a Smi zero.
8442 __ j(less, &done);
8443
8444 // We have a shifted exponent between 0 and 30 in scratch2.
8445 __ shr(scratch2, HeapNumber::kExponentShift);
8446 __ mov(ecx, Immediate(30));
8447 __ sub(ecx, Operand(scratch2));
8448
8449 __ bind(&right_exponent);
8450 // Here ecx is the shift, scratch is the exponent word.
8451 // Get the top bits of the mantissa.
8452 __ and_(scratch, HeapNumber::kMantissaMask);
8453 // Put back the implicit 1.
8454 __ or_(scratch, 1 << HeapNumber::kExponentShift);
8455 // Shift up the mantissa bits to take up the space the exponent used to
8456 // take. We have kExponentShift + 1 significant bits int he low end of the
8457 // word. Shift them to the top bits.
8458 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
8459 __ shl(scratch, shift_distance);
8460 // Get the second half of the double. For some exponents we don't
8461 // actually need this because the bits get shifted out again, but
8462 // it's probably slower to test than just to do it.
8463 __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
8464 // Shift down 22 bits to get the most significant 10 bits or the low
8465 // mantissa word.
8466 __ shr(scratch2, 32 - shift_distance);
8467 __ or_(scratch2, Operand(scratch));
8468 // Move down according to the exponent.
8469 __ shr_cl(scratch2);
8470 // Now the unsigned answer is in scratch2. We need to move it to ecx and
8471 // we may need to fix the sign.
8472 Label negative;
8473 __ xor_(ecx, Operand(ecx));
8474 __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
8475 __ j(greater, &negative);
8476 __ mov(ecx, scratch2);
8477 __ jmp(&done);
8478 __ bind(&negative);
8479 __ sub(ecx, Operand(scratch2));
8480 __ bind(&done);
8481 }
8482 }
8483
8484
8485 // Input: edx, eax are the left and right objects of a bit op.
8486 // Output: eax, ecx are left and right integers for a bit op.
LoadAsIntegers(MacroAssembler * masm,bool use_sse3,Label * conversion_failure)8487 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
8488 bool use_sse3,
8489 Label* conversion_failure) {
8490 // Check float operands.
8491 Label arg1_is_object, check_undefined_arg1;
8492 Label arg2_is_object, check_undefined_arg2;
8493 Label load_arg2, done;
8494
8495 __ test(edx, Immediate(kSmiTagMask));
8496 __ j(not_zero, &arg1_is_object);
8497 __ SmiUntag(edx);
8498 __ jmp(&load_arg2);
8499
8500 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
8501 __ bind(&check_undefined_arg1);
8502 __ cmp(edx, Factory::undefined_value());
8503 __ j(not_equal, conversion_failure);
8504 __ mov(edx, Immediate(0));
8505 __ jmp(&load_arg2);
8506
8507 __ bind(&arg1_is_object);
8508 __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
8509 __ cmp(ebx, Factory::heap_number_map());
8510 __ j(not_equal, &check_undefined_arg1);
8511 // Get the untagged integer version of the edx heap number in ecx.
8512 IntegerConvert(masm, edx, use_sse3, conversion_failure);
8513 __ mov(edx, ecx);
8514
8515 // Here edx has the untagged integer, eax has a Smi or a heap number.
8516 __ bind(&load_arg2);
8517 // Test if arg2 is a Smi.
8518 __ test(eax, Immediate(kSmiTagMask));
8519 __ j(not_zero, &arg2_is_object);
8520 __ SmiUntag(eax);
8521 __ mov(ecx, eax);
8522 __ jmp(&done);
8523
8524 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
8525 __ bind(&check_undefined_arg2);
8526 __ cmp(eax, Factory::undefined_value());
8527 __ j(not_equal, conversion_failure);
8528 __ mov(ecx, Immediate(0));
8529 __ jmp(&done);
8530
8531 __ bind(&arg2_is_object);
8532 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
8533 __ cmp(ebx, Factory::heap_number_map());
8534 __ j(not_equal, &check_undefined_arg2);
8535 // Get the untagged integer version of the eax heap number in ecx.
8536 IntegerConvert(masm, eax, use_sse3, conversion_failure);
8537 __ bind(&done);
8538 __ mov(eax, edx);
8539 }
8540
8541
LoadFloatOperand(MacroAssembler * masm,Register number)8542 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
8543 Register number) {
8544 Label load_smi, done;
8545
8546 __ test(number, Immediate(kSmiTagMask));
8547 __ j(zero, &load_smi, not_taken);
8548 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
8549 __ jmp(&done);
8550
8551 __ bind(&load_smi);
8552 __ SmiUntag(number);
8553 __ push(number);
8554 __ fild_s(Operand(esp, 0));
8555 __ pop(number);
8556
8557 __ bind(&done);
8558 }
8559
8560
LoadSSE2Operands(MacroAssembler * masm)8561 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
8562 Label load_smi_edx, load_eax, load_smi_eax, done;
8563 // Load operand in edx into xmm0.
8564 __ test(edx, Immediate(kSmiTagMask));
8565 __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
8566 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
8567
8568 __ bind(&load_eax);
8569 // Load operand in eax into xmm1.
8570 __ test(eax, Immediate(kSmiTagMask));
8571 __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
8572 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
8573 __ jmp(&done);
8574
8575 __ bind(&load_smi_edx);
8576 __ SmiUntag(edx); // Untag smi before converting to float.
8577 __ cvtsi2sd(xmm0, Operand(edx));
8578 __ SmiTag(edx); // Retag smi for heap number overwriting test.
8579 __ jmp(&load_eax);
8580
8581 __ bind(&load_smi_eax);
8582 __ SmiUntag(eax); // Untag smi before converting to float.
8583 __ cvtsi2sd(xmm1, Operand(eax));
8584 __ SmiTag(eax); // Retag smi for heap number overwriting test.
8585
8586 __ bind(&done);
8587 }
8588
8589
LoadSSE2Operands(MacroAssembler * masm,Label * not_numbers)8590 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
8591 Label* not_numbers) {
8592 Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
8593 // Load operand in edx into xmm0, or branch to not_numbers.
8594 __ test(edx, Immediate(kSmiTagMask));
8595 __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
8596 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
8597 __ j(not_equal, not_numbers); // Argument in edx is not a number.
8598 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
8599 __ bind(&load_eax);
8600 // Load operand in eax into xmm1, or branch to not_numbers.
8601 __ test(eax, Immediate(kSmiTagMask));
8602 __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
8603 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map());
8604 __ j(equal, &load_float_eax);
8605 __ jmp(not_numbers); // Argument in eax is not a number.
8606 __ bind(&load_smi_edx);
8607 __ SmiUntag(edx); // Untag smi before converting to float.
8608 __ cvtsi2sd(xmm0, Operand(edx));
8609 __ SmiTag(edx); // Retag smi for heap number overwriting test.
8610 __ jmp(&load_eax);
8611 __ bind(&load_smi_eax);
8612 __ SmiUntag(eax); // Untag smi before converting to float.
8613 __ cvtsi2sd(xmm1, Operand(eax));
8614 __ SmiTag(eax); // Retag smi for heap number overwriting test.
8615 __ jmp(&done);
8616 __ bind(&load_float_eax);
8617 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
8618 __ bind(&done);
8619 }
8620
8621
LoadSSE2Smis(MacroAssembler * masm,Register scratch)8622 void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
8623 Register scratch) {
8624 const Register left = edx;
8625 const Register right = eax;
8626 __ mov(scratch, left);
8627 ASSERT(!scratch.is(right)); // We're about to clobber scratch.
8628 __ SmiUntag(scratch);
8629 __ cvtsi2sd(xmm0, Operand(scratch));
8630
8631 __ mov(scratch, right);
8632 __ SmiUntag(scratch);
8633 __ cvtsi2sd(xmm1, Operand(scratch));
8634 }
8635
8636
LoadFloatOperands(MacroAssembler * masm,Register scratch,ArgLocation arg_location)8637 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
8638 Register scratch,
8639 ArgLocation arg_location) {
8640 Label load_smi_1, load_smi_2, done_load_1, done;
8641 if (arg_location == ARGS_IN_REGISTERS) {
8642 __ mov(scratch, edx);
8643 } else {
8644 __ mov(scratch, Operand(esp, 2 * kPointerSize));
8645 }
8646 __ test(scratch, Immediate(kSmiTagMask));
8647 __ j(zero, &load_smi_1, not_taken);
8648 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
8649 __ bind(&done_load_1);
8650
8651 if (arg_location == ARGS_IN_REGISTERS) {
8652 __ mov(scratch, eax);
8653 } else {
8654 __ mov(scratch, Operand(esp, 1 * kPointerSize));
8655 }
8656 __ test(scratch, Immediate(kSmiTagMask));
8657 __ j(zero, &load_smi_2, not_taken);
8658 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
8659 __ jmp(&done);
8660
8661 __ bind(&load_smi_1);
8662 __ SmiUntag(scratch);
8663 __ push(scratch);
8664 __ fild_s(Operand(esp, 0));
8665 __ pop(scratch);
8666 __ jmp(&done_load_1);
8667
8668 __ bind(&load_smi_2);
8669 __ SmiUntag(scratch);
8670 __ push(scratch);
8671 __ fild_s(Operand(esp, 0));
8672 __ pop(scratch);
8673
8674 __ bind(&done);
8675 }
8676
8677
LoadFloatSmis(MacroAssembler * masm,Register scratch)8678 void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
8679 Register scratch) {
8680 const Register left = edx;
8681 const Register right = eax;
8682 __ mov(scratch, left);
8683 ASSERT(!scratch.is(right)); // We're about to clobber scratch.
8684 __ SmiUntag(scratch);
8685 __ push(scratch);
8686 __ fild_s(Operand(esp, 0));
8687
8688 __ mov(scratch, right);
8689 __ SmiUntag(scratch);
8690 __ mov(Operand(esp, 0), scratch);
8691 __ fild_s(Operand(esp, 0));
8692 __ pop(scratch);
8693 }
8694
8695
CheckFloatOperands(MacroAssembler * masm,Label * non_float,Register scratch)8696 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
8697 Label* non_float,
8698 Register scratch) {
8699 Label test_other, done;
8700 // Test if both operands are floats or smi -> scratch=k_is_float;
8701 // Otherwise scratch = k_not_float.
8702 __ test(edx, Immediate(kSmiTagMask));
8703 __ j(zero, &test_other, not_taken); // argument in edx is OK
8704 __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
8705 __ cmp(scratch, Factory::heap_number_map());
8706 __ j(not_equal, non_float); // argument in edx is not a number -> NaN
8707
8708 __ bind(&test_other);
8709 __ test(eax, Immediate(kSmiTagMask));
8710 __ j(zero, &done); // argument in eax is OK
8711 __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
8712 __ cmp(scratch, Factory::heap_number_map());
8713 __ j(not_equal, non_float); // argument in eax is not a number -> NaN
8714
8715 // Fall-through: Both operands are numbers.
8716 __ bind(&done);
8717 }
8718
8719
Generate(MacroAssembler * masm)8720 void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
8721 Label slow, done;
8722
8723 if (op_ == Token::SUB) {
8724 // Check whether the value is a smi.
8725 Label try_float;
8726 __ test(eax, Immediate(kSmiTagMask));
8727 __ j(not_zero, &try_float, not_taken);
8728
8729 // Go slow case if the value of the expression is zero
8730 // to make sure that we switch between 0 and -0.
8731 __ test(eax, Operand(eax));
8732 __ j(zero, &slow, not_taken);
8733
8734 // The value of the expression is a smi that is not zero. Try
8735 // optimistic subtraction '0 - value'.
8736 Label undo;
8737 __ mov(edx, Operand(eax));
8738 __ Set(eax, Immediate(0));
8739 __ sub(eax, Operand(edx));
8740 __ j(overflow, &undo, not_taken);
8741
8742 // If result is a smi we are done.
8743 __ test(eax, Immediate(kSmiTagMask));
8744 __ j(zero, &done, taken);
8745
8746 // Restore eax and go slow case.
8747 __ bind(&undo);
8748 __ mov(eax, Operand(edx));
8749 __ jmp(&slow);
8750
8751 // Try floating point case.
8752 __ bind(&try_float);
8753 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
8754 __ cmp(edx, Factory::heap_number_map());
8755 __ j(not_equal, &slow);
8756 if (overwrite_) {
8757 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
8758 __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
8759 __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
8760 } else {
8761 __ mov(edx, Operand(eax));
8762 // edx: operand
8763 __ AllocateHeapNumber(eax, ebx, ecx, &undo);
8764 // eax: allocated 'empty' number
8765 __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
8766 __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
8767 __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
8768 __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
8769 __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
8770 }
8771 } else if (op_ == Token::BIT_NOT) {
8772 // Check if the operand is a heap number.
8773 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
8774 __ cmp(edx, Factory::heap_number_map());
8775 __ j(not_equal, &slow, not_taken);
8776
8777 // Convert the heap number in eax to an untagged integer in ecx.
8778 IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), &slow);
8779
8780 // Do the bitwise operation and check if the result fits in a smi.
8781 Label try_float;
8782 __ not_(ecx);
8783 __ cmp(ecx, 0xc0000000);
8784 __ j(sign, &try_float, not_taken);
8785
8786 // Tag the result as a smi and we're done.
8787 ASSERT(kSmiTagSize == 1);
8788 __ lea(eax, Operand(ecx, times_2, kSmiTag));
8789 __ jmp(&done);
8790
8791 // Try to store the result in a heap number.
8792 __ bind(&try_float);
8793 if (!overwrite_) {
8794 // Allocate a fresh heap number, but don't overwrite eax until
8795 // we're sure we can do it without going through the slow case
8796 // that needs the value in eax.
8797 __ AllocateHeapNumber(ebx, edx, edi, &slow);
8798 __ mov(eax, Operand(ebx));
8799 }
8800 if (CpuFeatures::IsSupported(SSE2)) {
8801 CpuFeatures::Scope use_sse2(SSE2);
8802 __ cvtsi2sd(xmm0, Operand(ecx));
8803 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
8804 } else {
8805 __ push(ecx);
8806 __ fild_s(Operand(esp, 0));
8807 __ pop(ecx);
8808 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
8809 }
8810 } else {
8811 UNIMPLEMENTED();
8812 }
8813
8814 // Return from the stub.
8815 __ bind(&done);
8816 __ StubReturn(1);
8817
8818 // Handle the slow case by jumping to the JavaScript builtin.
8819 __ bind(&slow);
8820 __ pop(ecx); // pop return address.
8821 __ push(eax);
8822 __ push(ecx); // push return address
8823 switch (op_) {
8824 case Token::SUB:
8825 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
8826 break;
8827 case Token::BIT_NOT:
8828 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
8829 break;
8830 default:
8831 UNREACHABLE();
8832 }
8833 }
8834
8835
GenerateReadLength(MacroAssembler * masm)8836 void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
8837 // Check if the calling frame is an arguments adaptor frame.
8838 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
8839 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
8840 __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
8841
8842 // Arguments adaptor case: Read the arguments length from the
8843 // adaptor frame and return it.
8844 // Otherwise nothing to do: The number of formal parameters has already been
8845 // passed in register eax by calling function. Just return it.
8846 if (CpuFeatures::IsSupported(CMOV)) {
8847 CpuFeatures::Scope use_cmov(CMOV);
8848 __ cmov(equal, eax,
8849 Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
8850 } else {
8851 Label exit;
8852 __ j(not_equal, &exit);
8853 __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
8854 __ bind(&exit);
8855 }
8856 __ ret(0);
8857 }
8858
8859
GenerateReadElement(MacroAssembler * masm)8860 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
8861 // The key is in edx and the parameter count is in eax.
8862
8863 // The displacement is used for skipping the frame pointer on the
8864 // stack. It is the offset of the last parameter (if any) relative
8865 // to the frame pointer.
8866 static const int kDisplacement = 1 * kPointerSize;
8867
8868 // Check that the key is a smi.
8869 Label slow;
8870 __ test(edx, Immediate(kSmiTagMask));
8871 __ j(not_zero, &slow, not_taken);
8872
8873 // Check if the calling frame is an arguments adaptor frame.
8874 Label adaptor;
8875 __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
8876 __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
8877 __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
8878 __ j(equal, &adaptor);
8879
8880 // Check index against formal parameters count limit passed in
8881 // through register eax. Use unsigned comparison to get negative
8882 // check for free.
8883 __ cmp(edx, Operand(eax));
8884 __ j(above_equal, &slow, not_taken);
8885
8886 // Read the argument from the stack and return it.
8887 ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
8888 __ lea(ebx, Operand(ebp, eax, times_2, 0));
8889 __ neg(edx);
8890 __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
8891 __ ret(0);
8892
8893 // Arguments adaptor case: Check index against actual arguments
8894 // limit found in the arguments adaptor frame. Use unsigned
8895 // comparison to get negative check for free.
8896 __ bind(&adaptor);
8897 __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
8898 __ cmp(edx, Operand(ecx));
8899 __ j(above_equal, &slow, not_taken);
8900
8901 // Read the argument from the stack and return it.
8902 ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
8903 __ lea(ebx, Operand(ebx, ecx, times_2, 0));
8904 __ neg(edx);
8905 __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
8906 __ ret(0);
8907
8908 // Slow-case: Handle non-smi or out-of-bounds access to arguments
8909 // by calling the runtime system.
8910 __ bind(&slow);
8911 __ pop(ebx); // Return address.
8912 __ push(edx);
8913 __ push(ebx);
8914 __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
8915 }
8916
8917
GenerateNewObject(MacroAssembler * masm)8918 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
8919 // esp[0] : return address
8920 // esp[4] : number of parameters
8921 // esp[8] : receiver displacement
8922 // esp[16] : function
8923
8924 // The displacement is used for skipping the return address and the
8925 // frame pointer on the stack. It is the offset of the last
8926 // parameter (if any) relative to the frame pointer.
8927 static const int kDisplacement = 2 * kPointerSize;
8928
8929 // Check if the calling frame is an arguments adaptor frame.
8930 Label adaptor_frame, try_allocate, runtime;
8931 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
8932 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
8933 __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
8934 __ j(equal, &adaptor_frame);
8935
8936 // Get the length from the frame.
8937 __ mov(ecx, Operand(esp, 1 * kPointerSize));
8938 __ jmp(&try_allocate);
8939
8940 // Patch the arguments.length and the parameters pointer.
8941 __ bind(&adaptor_frame);
8942 __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
8943 __ mov(Operand(esp, 1 * kPointerSize), ecx);
8944 __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
8945 __ mov(Operand(esp, 2 * kPointerSize), edx);
8946
8947 // Try the new space allocation. Start out with computing the size of
8948 // the arguments object and the elements array.
8949 Label add_arguments_object;
8950 __ bind(&try_allocate);
8951 __ test(ecx, Operand(ecx));
8952 __ j(zero, &add_arguments_object);
8953 __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
8954 __ bind(&add_arguments_object);
8955 __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSize));
8956
8957 // Do the allocation of both objects in one go.
8958 __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
8959
8960 // Get the arguments boilerplate from the current (global) context.
8961 int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
8962 __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
8963 __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
8964 __ mov(edi, Operand(edi, offset));
8965
8966 // Copy the JS object part.
8967 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
8968 __ mov(ebx, FieldOperand(edi, i));
8969 __ mov(FieldOperand(eax, i), ebx);
8970 }
8971
8972 // Setup the callee in-object property.
8973 ASSERT(Heap::arguments_callee_index == 0);
8974 __ mov(ebx, Operand(esp, 3 * kPointerSize));
8975 __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx);
8976
8977 // Get the length (smi tagged) and set that as an in-object property too.
8978 ASSERT(Heap::arguments_length_index == 1);
8979 __ mov(ecx, Operand(esp, 1 * kPointerSize));
8980 __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx);
8981
8982 // If there are no actual arguments, we're done.
8983 Label done;
8984 __ test(ecx, Operand(ecx));
8985 __ j(zero, &done);
8986
8987 // Get the parameters pointer from the stack and untag the length.
8988 __ mov(edx, Operand(esp, 2 * kPointerSize));
8989 __ SmiUntag(ecx);
8990
8991 // Setup the elements pointer in the allocated arguments object and
8992 // initialize the header in the elements fixed array.
8993 __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
8994 __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
8995 __ mov(FieldOperand(edi, FixedArray::kMapOffset),
8996 Immediate(Factory::fixed_array_map()));
8997 __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
8998
8999 // Copy the fixed array slots.
9000 Label loop;
9001 __ bind(&loop);
9002 __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
9003 __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
9004 __ add(Operand(edi), Immediate(kPointerSize));
9005 __ sub(Operand(edx), Immediate(kPointerSize));
9006 __ dec(ecx);
9007 __ j(not_zero, &loop);
9008
9009 // Return and remove the on-stack parameters.
9010 __ bind(&done);
9011 __ ret(3 * kPointerSize);
9012
9013 // Do the runtime call to allocate the arguments object.
9014 __ bind(&runtime);
9015 __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
9016 }
9017
9018
Generate(MacroAssembler * masm)9019 void RegExpExecStub::Generate(MacroAssembler* masm) {
9020 // Just jump directly to runtime if native RegExp is not selected at compile
9021 // time or if regexp entry in generated code is turned off runtime switch or
9022 // at compilation.
9023 #ifndef V8_NATIVE_REGEXP
9024 __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
9025 #else // V8_NATIVE_REGEXP
9026 if (!FLAG_regexp_entry_native) {
9027 __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
9028 return;
9029 }
9030
9031 // Stack frame on entry.
9032 // esp[0]: return address
9033 // esp[4]: last_match_info (expected JSArray)
9034 // esp[8]: previous index
9035 // esp[12]: subject string
9036 // esp[16]: JSRegExp object
9037
9038 static const int kLastMatchInfoOffset = 1 * kPointerSize;
9039 static const int kPreviousIndexOffset = 2 * kPointerSize;
9040 static const int kSubjectOffset = 3 * kPointerSize;
9041 static const int kJSRegExpOffset = 4 * kPointerSize;
9042
9043 Label runtime, invoke_regexp;
9044
9045 // Ensure that a RegExp stack is allocated.
9046 ExternalReference address_of_regexp_stack_memory_address =
9047 ExternalReference::address_of_regexp_stack_memory_address();
9048 ExternalReference address_of_regexp_stack_memory_size =
9049 ExternalReference::address_of_regexp_stack_memory_size();
9050 __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
9051 __ test(ebx, Operand(ebx));
9052 __ j(zero, &runtime, not_taken);
9053
9054 // Check that the first argument is a JSRegExp object.
9055 __ mov(eax, Operand(esp, kJSRegExpOffset));
9056 ASSERT_EQ(0, kSmiTag);
9057 __ test(eax, Immediate(kSmiTagMask));
9058 __ j(zero, &runtime);
9059 __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
9060 __ j(not_equal, &runtime);
9061 // Check that the RegExp has been compiled (data contains a fixed array).
9062 __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
9063 if (FLAG_debug_code) {
9064 __ test(ecx, Immediate(kSmiTagMask));
9065 __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
9066 __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
9067 __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
9068 }
9069
9070 // ecx: RegExp data (FixedArray)
9071 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
9072 __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
9073 __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
9074 __ j(not_equal, &runtime);
9075
9076 // ecx: RegExp data (FixedArray)
9077 // Check that the number of captures fit in the static offsets vector buffer.
9078 __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
9079 // Calculate number of capture registers (number_of_captures + 1) * 2. This
9080 // uses the asumption that smis are 2 * their untagged value.
9081 ASSERT_EQ(0, kSmiTag);
9082 ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
9083 __ add(Operand(edx), Immediate(2)); // edx was a smi.
9084 // Check that the static offsets vector buffer is large enough.
9085 __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
9086 __ j(above, &runtime);
9087
9088 // ecx: RegExp data (FixedArray)
9089 // edx: Number of capture registers
9090 // Check that the second argument is a string.
9091 __ mov(eax, Operand(esp, kSubjectOffset));
9092 __ test(eax, Immediate(kSmiTagMask));
9093 __ j(zero, &runtime);
9094 Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
9095 __ j(NegateCondition(is_string), &runtime);
9096 // Get the length of the string to ebx.
9097 __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
9098
9099 // ebx: Length of subject string
9100 // ecx: RegExp data (FixedArray)
9101 // edx: Number of capture registers
9102 // Check that the third argument is a positive smi.
9103 // Check that the third argument is a positive smi less than the subject
9104 // string length. A negative value will be greater (usigned comparison).
9105 __ mov(eax, Operand(esp, kPreviousIndexOffset));
9106 __ SmiUntag(eax);
9107 __ cmp(eax, Operand(ebx));
9108 __ j(above, &runtime);
9109
9110 // ecx: RegExp data (FixedArray)
9111 // edx: Number of capture registers
9112 // Check that the fourth object is a JSArray object.
9113 __ mov(eax, Operand(esp, kLastMatchInfoOffset));
9114 __ test(eax, Immediate(kSmiTagMask));
9115 __ j(zero, &runtime);
9116 __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
9117 __ j(not_equal, &runtime);
9118 // Check that the JSArray is in fast case.
9119 __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
9120 __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
9121 __ cmp(eax, Factory::fixed_array_map());
9122 __ j(not_equal, &runtime);
9123 // Check that the last match info has space for the capture registers and the
9124 // additional information.
9125 __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
9126 __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
9127 __ cmp(edx, Operand(eax));
9128 __ j(greater, &runtime);
9129
9130 // ecx: RegExp data (FixedArray)
9131 // Check the representation and encoding of the subject string.
9132 Label seq_string, seq_two_byte_string, check_code;
9133 const int kStringRepresentationEncodingMask =
9134 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
9135 __ mov(eax, Operand(esp, kSubjectOffset));
9136 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
9137 __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
9138 __ and_(ebx, kStringRepresentationEncodingMask);
9139 // First check for sequential string.
9140 ASSERT_EQ(0, kStringTag);
9141 ASSERT_EQ(0, kSeqStringTag);
9142 __ test(Operand(ebx),
9143 Immediate(kIsNotStringMask | kStringRepresentationMask));
9144 __ j(zero, &seq_string);
9145
9146 // Check for flat cons string.
9147 // A flat cons string is a cons string where the second part is the empty
9148 // string. In that case the subject string is just the first part of the cons
9149 // string. Also in this case the first part of the cons string is known to be
9150 // a sequential string or an external string.
9151 __ mov(edx, ebx);
9152 __ and_(edx, kStringRepresentationMask);
9153 __ cmp(edx, kConsStringTag);
9154 __ j(not_equal, &runtime);
9155 __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
9156 __ cmp(Operand(edx), Factory::empty_string());
9157 __ j(not_equal, &runtime);
9158 __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
9159 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
9160 __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
9161 ASSERT_EQ(0, kSeqStringTag);
9162 __ test(ebx, Immediate(kStringRepresentationMask));
9163 __ j(not_zero, &runtime);
9164 __ and_(ebx, kStringRepresentationEncodingMask);
9165
9166 __ bind(&seq_string);
9167 // eax: subject string (sequential either ascii to two byte)
9168 // ebx: suject string type & kStringRepresentationEncodingMask
9169 // ecx: RegExp data (FixedArray)
9170 // Check that the irregexp code has been generated for an ascii string. If
9171 // it has, the field contains a code object otherwise it contains the hole.
9172 __ cmp(ebx, kStringTag | kSeqStringTag | kTwoByteStringTag);
9173 __ j(equal, &seq_two_byte_string);
9174 if (FLAG_debug_code) {
9175 __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
9176 __ Check(equal, "Expected sequential ascii string");
9177 }
9178 __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
9179 __ Set(edi, Immediate(1)); // Type is ascii.
9180 __ jmp(&check_code);
9181
9182 __ bind(&seq_two_byte_string);
9183 // eax: subject string
9184 // ecx: RegExp data (FixedArray)
9185 __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
9186 __ Set(edi, Immediate(0)); // Type is two byte.
9187
9188 __ bind(&check_code);
9189 // Check that the irregexp code has been generated for the actual string
9190 // encoding. If it has, the field contains a code object otherwise it contains
9191 // the hole.
9192 __ CmpObjectType(edx, CODE_TYPE, ebx);
9193 __ j(not_equal, &runtime);
9194
9195 // eax: subject string
9196 // edx: code
9197 // edi: encoding of subject string (1 if ascii, 0 if two_byte);
9198 // Load used arguments before starting to push arguments for call to native
9199 // RegExp code to avoid handling changing stack height.
9200 __ mov(ebx, Operand(esp, kPreviousIndexOffset));
9201 __ SmiUntag(ebx); // Previous index from smi.
9202
9203 // eax: subject string
9204 // ebx: previous index
9205 // edx: code
9206 // edi: encoding of subject string (1 if ascii 0 if two_byte);
9207 // All checks done. Now push arguments for native regexp code.
9208 __ IncrementCounter(&Counters::regexp_entry_native, 1);
9209
9210 // Argument 7: Indicate that this is a direct call from JavaScript.
9211 __ push(Immediate(1));
9212
9213 // Argument 6: Start (high end) of backtracking stack memory area.
9214 __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
9215 __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
9216 __ push(ecx);
9217
9218 // Argument 5: static offsets vector buffer.
9219 __ push(Immediate(ExternalReference::address_of_static_offsets_vector()));
9220
9221 // Argument 4: End of string data
9222 // Argument 3: Start of string data
9223 Label push_two_byte, push_rest;
9224 __ test(edi, Operand(edi));
9225 __ mov(edi, FieldOperand(eax, String::kLengthOffset));
9226 __ j(zero, &push_two_byte);
9227 __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
9228 __ push(ecx); // Argument 4.
9229 __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
9230 __ push(ecx); // Argument 3.
9231 __ jmp(&push_rest);
9232
9233 __ bind(&push_two_byte);
9234 __ lea(ecx, FieldOperand(eax, edi, times_2, SeqTwoByteString::kHeaderSize));
9235 __ push(ecx); // Argument 4.
9236 __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
9237 __ push(ecx); // Argument 3.
9238
9239 __ bind(&push_rest);
9240
9241 // Argument 2: Previous index.
9242 __ push(ebx);
9243
9244 // Argument 1: Subject string.
9245 __ push(eax);
9246
9247 // Locate the code entry and call it.
9248 __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
9249 __ call(Operand(edx));
9250 // Remove arguments.
9251 __ add(Operand(esp), Immediate(7 * kPointerSize));
9252
9253 // Check the result.
9254 Label success;
9255 __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
9256 __ j(equal, &success, taken);
9257 Label failure;
9258 __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
9259 __ j(equal, &failure, taken);
9260 __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
9261 // If not exception it can only be retry. Handle that in the runtime system.
9262 __ j(not_equal, &runtime);
9263 // Result must now be exception. If there is no pending exception already a
9264 // stack overflow (on the backtrack stack) was detected in RegExp code but
9265 // haven't created the exception yet. Handle that in the runtime system.
9266 // TODO(592) Rerunning the RegExp to get the stack overflow exception.
9267 ExternalReference pending_exception(Top::k_pending_exception_address);
9268 __ mov(eax,
9269 Operand::StaticVariable(ExternalReference::the_hole_value_location()));
9270 __ cmp(eax, Operand::StaticVariable(pending_exception));
9271 __ j(equal, &runtime);
9272 __ bind(&failure);
9273 // For failure and exception return null.
9274 __ mov(Operand(eax), Factory::null_value());
9275 __ ret(4 * kPointerSize);
9276
9277 // Load RegExp data.
9278 __ bind(&success);
9279 __ mov(eax, Operand(esp, kJSRegExpOffset));
9280 __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
9281 __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
9282 // Calculate number of capture registers (number_of_captures + 1) * 2.
9283 ASSERT_EQ(0, kSmiTag);
9284 ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
9285 __ add(Operand(edx), Immediate(2)); // edx was a smi.
9286
9287 // edx: Number of capture registers
9288 // Load last_match_info which is still known to be a fast case JSArray.
9289 __ mov(eax, Operand(esp, kLastMatchInfoOffset));
9290 __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
9291
9292 // ebx: last_match_info backing store (FixedArray)
9293 // edx: number of capture registers
9294 // Store the capture count.
9295 __ SmiTag(edx); // Number of capture registers to smi.
9296 __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
9297 __ SmiUntag(edx); // Number of capture registers back from smi.
9298 // Store last subject and last input.
9299 __ mov(eax, Operand(esp, kSubjectOffset));
9300 __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
9301 __ mov(ecx, ebx);
9302 __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
9303 __ mov(eax, Operand(esp, kSubjectOffset));
9304 __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
9305 __ mov(ecx, ebx);
9306 __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
9307
9308 // Get the static offsets vector filled by the native regexp code.
9309 ExternalReference address_of_static_offsets_vector =
9310 ExternalReference::address_of_static_offsets_vector();
9311 __ mov(ecx, Immediate(address_of_static_offsets_vector));
9312
9313 // ebx: last_match_info backing store (FixedArray)
9314 // ecx: offsets vector
9315 // edx: number of capture registers
9316 Label next_capture, done;
9317 __ mov(eax, Operand(esp, kPreviousIndexOffset));
9318 // Capture register counter starts from number of capture registers and
9319 // counts down until wraping after zero.
9320 __ bind(&next_capture);
9321 __ sub(Operand(edx), Immediate(1));
9322 __ j(negative, &done);
9323 // Read the value from the static offsets vector buffer.
9324 __ mov(edi, Operand(ecx, edx, times_int_size, 0));
9325 // Perform explicit shift
9326 ASSERT_EQ(0, kSmiTag);
9327 __ shl(edi, kSmiTagSize);
9328 // Add previous index (from its stack slot) if value is not negative.
9329 Label capture_negative;
9330 // Carry flag set by shift above.
9331 __ j(negative, &capture_negative, not_taken);
9332 __ add(edi, Operand(eax)); // Add previous index (adding smi to smi).
9333 __ bind(&capture_negative);
9334 // Store the smi value in the last match info.
9335 __ mov(FieldOperand(ebx,
9336 edx,
9337 times_pointer_size,
9338 RegExpImpl::kFirstCaptureOffset),
9339 edi);
9340 __ jmp(&next_capture);
9341 __ bind(&done);
9342
9343 // Return last match info.
9344 __ mov(eax, Operand(esp, kLastMatchInfoOffset));
9345 __ ret(4 * kPointerSize);
9346
9347 // Do the runtime call to execute the regexp.
9348 __ bind(&runtime);
9349 __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
9350 #endif // V8_NATIVE_REGEXP
9351 }
9352
9353
GenerateLookupNumberStringCache(MacroAssembler * masm,Register object,Register result,Register scratch1,Register scratch2,bool object_is_smi,Label * not_found)9354 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
9355 Register object,
9356 Register result,
9357 Register scratch1,
9358 Register scratch2,
9359 bool object_is_smi,
9360 Label* not_found) {
9361 // Currently only lookup for smis. Check for smi if object is not known to be
9362 // a smi.
9363 if (!object_is_smi) {
9364 ASSERT(kSmiTag == 0);
9365 __ test(object, Immediate(kSmiTagMask));
9366 __ j(not_zero, not_found);
9367 }
9368
9369 // Use of registers. Register result is used as a temporary.
9370 Register number_string_cache = result;
9371 Register mask = scratch1;
9372 Register scratch = scratch2;
9373
9374 // Load the number string cache.
9375 ExternalReference roots_address = ExternalReference::roots_address();
9376 __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
9377 __ mov(number_string_cache,
9378 Operand::StaticArray(scratch, times_pointer_size, roots_address));
9379 // Make the hash mask from the length of the number string cache. It
9380 // contains two elements (number and string) for each cache entry.
9381 __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
9382 __ shr(mask, 1); // Divide length by two (length is not a smi).
9383 __ sub(Operand(mask), Immediate(1)); // Make mask.
9384 // Calculate the entry in the number string cache. The hash value in the
9385 // number string cache for smis is just the smi value.
9386 __ mov(scratch, object);
9387 __ SmiUntag(scratch);
9388 __ and_(scratch, Operand(mask));
9389 // Check if the entry is the smi we are looking for.
9390 __ cmp(object,
9391 FieldOperand(number_string_cache,
9392 scratch,
9393 times_twice_pointer_size,
9394 FixedArray::kHeaderSize));
9395 __ j(not_equal, not_found);
9396
9397 // Get the result from the cache.
9398 __ mov(result,
9399 FieldOperand(number_string_cache,
9400 scratch,
9401 times_twice_pointer_size,
9402 FixedArray::kHeaderSize + kPointerSize));
9403 __ IncrementCounter(&Counters::number_to_string_native, 1);
9404 }
9405
9406
Generate(MacroAssembler * masm)9407 void NumberToStringStub::Generate(MacroAssembler* masm) {
9408 Label runtime;
9409
9410 __ mov(ebx, Operand(esp, kPointerSize));
9411
9412 // Generate code to lookup number in the number string cache.
9413 GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
9414 __ ret(1 * kPointerSize);
9415
9416 __ bind(&runtime);
9417 // Handle number to string in the runtime system if not found in the cache.
9418 __ TailCallRuntime(ExternalReference(Runtime::kNumberToString), 1, 1);
9419 }
9420
9421
Generate(MacroAssembler * masm)9422 void CompareStub::Generate(MacroAssembler* masm) {
9423 Label call_builtin, done;
9424
9425 // NOTICE! This code is only reached after a smi-fast-case check, so
9426 // it is certain that at least one operand isn't a smi.
9427
9428 if (cc_ == equal) { // Both strict and non-strict.
9429 Label slow; // Fallthrough label.
9430 // Equality is almost reflexive (everything but NaN), so start by testing
9431 // for "identity and not NaN".
9432 {
9433 Label not_identical;
9434 __ cmp(eax, Operand(edx));
9435 __ j(not_equal, ¬_identical);
9436 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
9437 // so we do the second best thing - test it ourselves.
9438
9439 if (never_nan_nan_) {
9440 __ Set(eax, Immediate(0));
9441 __ ret(0);
9442 } else {
9443 Label return_equal;
9444 Label heap_number;
9445 // If it's not a heap number, then return equal.
9446 __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
9447 Immediate(Factory::heap_number_map()));
9448 __ j(equal, &heap_number);
9449 __ bind(&return_equal);
9450 __ Set(eax, Immediate(0));
9451 __ ret(0);
9452
9453 __ bind(&heap_number);
9454 // It is a heap number, so return non-equal if it's NaN and equal if
9455 // it's not NaN.
9456 // The representation of NaN values has all exponent bits (52..62) set,
9457 // and not all mantissa bits (0..51) clear.
9458 // We only accept QNaNs, which have bit 51 set.
9459 // Read top bits of double representation (second word of value).
9460
9461 // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
9462 // all bits in the mask are set. We only need to check the word
9463 // that contains the exponent and high bit of the mantissa.
9464 ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
9465 __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
9466 __ xor_(eax, Operand(eax));
9467 // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
9468 // bits.
9469 __ add(edx, Operand(edx));
9470 __ cmp(edx, kQuietNaNHighBitsMask << 1);
9471 __ setcc(above_equal, eax);
9472 __ ret(0);
9473 }
9474
9475 __ bind(¬_identical);
9476 }
9477
9478 // If we're doing a strict equality comparison, we don't have to do
9479 // type conversion, so we generate code to do fast comparison for objects
9480 // and oddballs. Non-smi numbers and strings still go through the usual
9481 // slow-case code.
9482 if (strict_) {
9483 // If either is a Smi (we know that not both are), then they can only
9484 // be equal if the other is a HeapNumber. If so, use the slow case.
9485 {
9486 Label not_smis;
9487 ASSERT_EQ(0, kSmiTag);
9488 ASSERT_EQ(0, Smi::FromInt(0));
9489 __ mov(ecx, Immediate(kSmiTagMask));
9490 __ and_(ecx, Operand(eax));
9491 __ test(ecx, Operand(edx));
9492 __ j(not_zero, ¬_smis);
9493 // One operand is a smi.
9494
9495 // Check whether the non-smi is a heap number.
9496 ASSERT_EQ(1, kSmiTagMask);
9497 // ecx still holds eax & kSmiTag, which is either zero or one.
9498 __ sub(Operand(ecx), Immediate(0x01));
9499 __ mov(ebx, edx);
9500 __ xor_(ebx, Operand(eax));
9501 __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
9502 __ xor_(ebx, Operand(eax));
9503 // if eax was smi, ebx is now edx, else eax.
9504
9505 // Check if the non-smi operand is a heap number.
9506 __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
9507 Immediate(Factory::heap_number_map()));
9508 // If heap number, handle it in the slow case.
9509 __ j(equal, &slow);
9510 // Return non-equal (ebx is not zero)
9511 __ mov(eax, ebx);
9512 __ ret(0);
9513
9514 __ bind(¬_smis);
9515 }
9516
9517 // If either operand is a JSObject or an oddball value, then they are not
9518 // equal since their pointers are different
9519 // There is no test for undetectability in strict equality.
9520
9521 // Get the type of the first operand.
9522 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
9523 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
9524
9525 // If the first object is a JS object, we have done pointer comparison.
9526 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
9527 Label first_non_object;
9528 __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
9529 __ j(less, &first_non_object);
9530
9531 // Return non-zero (eax is not zero)
9532 Label return_not_equal;
9533 ASSERT(kHeapObjectTag != 0);
9534 __ bind(&return_not_equal);
9535 __ ret(0);
9536
9537 __ bind(&first_non_object);
9538 // Check for oddballs: true, false, null, undefined.
9539 __ cmp(ecx, ODDBALL_TYPE);
9540 __ j(equal, &return_not_equal);
9541
9542 __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
9543 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
9544
9545 __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
9546 __ j(greater_equal, &return_not_equal);
9547
9548 // Check for oddballs: true, false, null, undefined.
9549 __ cmp(ecx, ODDBALL_TYPE);
9550 __ j(equal, &return_not_equal);
9551
9552 // Fall through to the general case.
9553 }
9554 __ bind(&slow);
9555 }
9556
9557 // Push arguments below the return address.
9558 __ pop(ecx);
9559 __ push(eax);
9560 __ push(edx);
9561 __ push(ecx);
9562
9563 // Inlined floating point compare.
9564 // Call builtin if operands are not floating point or smi.
9565 Label check_for_symbols;
9566 Label unordered;
9567 if (CpuFeatures::IsSupported(SSE2)) {
9568 CpuFeatures::Scope use_sse2(SSE2);
9569 CpuFeatures::Scope use_cmov(CMOV);
9570
9571 FloatingPointHelper::LoadSSE2Operands(masm, &check_for_symbols);
9572 __ comisd(xmm0, xmm1);
9573
9574 // Jump to builtin for NaN.
9575 __ j(parity_even, &unordered, not_taken);
9576 __ mov(eax, 0); // equal
9577 __ mov(ecx, Immediate(Smi::FromInt(1)));
9578 __ cmov(above, eax, Operand(ecx));
9579 __ mov(ecx, Immediate(Smi::FromInt(-1)));
9580 __ cmov(below, eax, Operand(ecx));
9581 __ ret(2 * kPointerSize);
9582 } else {
9583 FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols, ebx);
9584 FloatingPointHelper::LoadFloatOperands(masm, ecx);
9585 __ FCmp();
9586
9587 // Jump to builtin for NaN.
9588 __ j(parity_even, &unordered, not_taken);
9589
9590 Label below_lbl, above_lbl;
9591 // Return a result of -1, 0, or 1, to indicate result of comparison.
9592 __ j(below, &below_lbl, not_taken);
9593 __ j(above, &above_lbl, not_taken);
9594
9595 __ xor_(eax, Operand(eax)); // equal
9596 // Both arguments were pushed in case a runtime call was needed.
9597 __ ret(2 * kPointerSize);
9598
9599 __ bind(&below_lbl);
9600 __ mov(eax, Immediate(Smi::FromInt(-1)));
9601 __ ret(2 * kPointerSize);
9602
9603 __ bind(&above_lbl);
9604 __ mov(eax, Immediate(Smi::FromInt(1)));
9605 __ ret(2 * kPointerSize); // eax, edx were pushed
9606 }
9607 // If one of the numbers was NaN, then the result is always false.
9608 // The cc is never not-equal.
9609 __ bind(&unordered);
9610 ASSERT(cc_ != not_equal);
9611 if (cc_ == less || cc_ == less_equal) {
9612 __ mov(eax, Immediate(Smi::FromInt(1)));
9613 } else {
9614 __ mov(eax, Immediate(Smi::FromInt(-1)));
9615 }
9616 __ ret(2 * kPointerSize); // eax, edx were pushed
9617
9618 // Fast negative check for symbol-to-symbol equality.
9619 __ bind(&check_for_symbols);
9620 Label check_for_strings;
9621 if (cc_ == equal) {
9622 BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
9623 BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
9624
9625 // We've already checked for object identity, so if both operands
9626 // are symbols they aren't equal. Register eax already holds a
9627 // non-zero value, which indicates not equal, so just return.
9628 __ ret(2 * kPointerSize);
9629 }
9630
9631 __ bind(&check_for_strings);
9632
9633 __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &call_builtin);
9634
9635 // Inline comparison of ascii strings.
9636 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
9637 edx,
9638 eax,
9639 ecx,
9640 ebx,
9641 edi);
9642 #ifdef DEBUG
9643 __ Abort("Unexpected fall-through from string comparison");
9644 #endif
9645
9646 __ bind(&call_builtin);
9647 // must swap argument order
9648 __ pop(ecx);
9649 __ pop(edx);
9650 __ pop(eax);
9651 __ push(edx);
9652 __ push(eax);
9653
9654 // Figure out which native to call and setup the arguments.
9655 Builtins::JavaScript builtin;
9656 if (cc_ == equal) {
9657 builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
9658 } else {
9659 builtin = Builtins::COMPARE;
9660 int ncr; // NaN compare result
9661 if (cc_ == less || cc_ == less_equal) {
9662 ncr = GREATER;
9663 } else {
9664 ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases
9665 ncr = LESS;
9666 }
9667 __ push(Immediate(Smi::FromInt(ncr)));
9668 }
9669
9670 // Restore return address on the stack.
9671 __ push(ecx);
9672
9673 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
9674 // tagged as a small integer.
9675 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
9676 }
9677
9678
BranchIfNonSymbol(MacroAssembler * masm,Label * label,Register object,Register scratch)9679 void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
9680 Label* label,
9681 Register object,
9682 Register scratch) {
9683 __ test(object, Immediate(kSmiTagMask));
9684 __ j(zero, label);
9685 __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
9686 __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
9687 __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
9688 __ cmp(scratch, kSymbolTag | kStringTag);
9689 __ j(not_equal, label);
9690 }
9691
9692
Generate(MacroAssembler * masm)9693 void StackCheckStub::Generate(MacroAssembler* masm) {
9694 // Because builtins always remove the receiver from the stack, we
9695 // have to fake one to avoid underflowing the stack. The receiver
9696 // must be inserted below the return address on the stack so we
9697 // temporarily store that in a register.
9698 __ pop(eax);
9699 __ push(Immediate(Smi::FromInt(0)));
9700 __ push(eax);
9701
9702 // Do tail-call to runtime routine.
9703 __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
9704 }
9705
9706
Generate(MacroAssembler * masm)9707 void CallFunctionStub::Generate(MacroAssembler* masm) {
9708 Label slow;
9709
9710 // If the receiver might be a value (string, number or boolean) check for this
9711 // and box it if it is.
9712 if (ReceiverMightBeValue()) {
9713 // Get the receiver from the stack.
9714 // +1 ~ return address
9715 Label receiver_is_value, receiver_is_js_object;
9716 __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
9717
9718 // Check if receiver is a smi (which is a number value).
9719 __ test(eax, Immediate(kSmiTagMask));
9720 __ j(zero, &receiver_is_value, not_taken);
9721
9722 // Check if the receiver is a valid JS object.
9723 __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi);
9724 __ j(above_equal, &receiver_is_js_object);
9725
9726 // Call the runtime to box the value.
9727 __ bind(&receiver_is_value);
9728 __ EnterInternalFrame();
9729 __ push(eax);
9730 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
9731 __ LeaveInternalFrame();
9732 __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax);
9733
9734 __ bind(&receiver_is_js_object);
9735 }
9736
9737 // Get the function to call from the stack.
9738 // +2 ~ receiver, return address
9739 __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
9740
9741 // Check that the function really is a JavaScript function.
9742 __ test(edi, Immediate(kSmiTagMask));
9743 __ j(zero, &slow, not_taken);
9744 // Goto slow case if we do not have a function.
9745 __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
9746 __ j(not_equal, &slow, not_taken);
9747
9748 // Fast-case: Just invoke the function.
9749 ParameterCount actual(argc_);
9750 __ InvokeFunction(edi, actual, JUMP_FUNCTION);
9751
9752 // Slow-case: Non-function called.
9753 __ bind(&slow);
9754 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
9755 // of the original receiver from the call site).
9756 __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
9757 __ Set(eax, Immediate(argc_));
9758 __ Set(ebx, Immediate(0));
9759 __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
9760 Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
9761 __ jmp(adaptor, RelocInfo::CODE_TARGET);
9762 }
9763
9764
GenerateThrowTOS(MacroAssembler * masm)9765 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
9766 // eax holds the exception.
9767
9768 // Adjust this code if not the case.
9769 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
9770
9771 // Drop the sp to the top of the handler.
9772 ExternalReference handler_address(Top::k_handler_address);
9773 __ mov(esp, Operand::StaticVariable(handler_address));
9774
9775 // Restore next handler and frame pointer, discard handler state.
9776 ASSERT(StackHandlerConstants::kNextOffset == 0);
9777 __ pop(Operand::StaticVariable(handler_address));
9778 ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
9779 __ pop(ebp);
9780 __ pop(edx); // Remove state.
9781
9782 // Before returning we restore the context from the frame pointer if
9783 // not NULL. The frame pointer is NULL in the exception handler of
9784 // a JS entry frame.
9785 __ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL.
9786 Label skip;
9787 __ cmp(ebp, 0);
9788 __ j(equal, &skip, not_taken);
9789 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
9790 __ bind(&skip);
9791
9792 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
9793 __ ret(0);
9794 }
9795
9796
9797 // If true, a Handle<T> passed by value is passed and returned by
9798 // using the location_ field directly. If false, it is passed and
9799 // returned as a pointer to a handle.
9800 #ifdef USING_MAC_ABI
9801 static const bool kPassHandlesDirectly = true;
9802 #else
9803 static const bool kPassHandlesDirectly = false;
9804 #endif
9805
9806
Generate(MacroAssembler * masm)9807 void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
9808 Label get_result;
9809 Label prologue;
9810 Label promote_scheduled_exception;
9811 __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc);
9812 ASSERT_EQ(kArgc, 4);
9813 if (kPassHandlesDirectly) {
9814 // When handles as passed directly we don't have to allocate extra
9815 // space for and pass an out parameter.
9816 __ mov(Operand(esp, 0 * kPointerSize), ebx); // name.
9817 __ mov(Operand(esp, 1 * kPointerSize), eax); // arguments pointer.
9818 } else {
9819 // The function expects three arguments to be passed but we allocate
9820 // four to get space for the output cell. The argument slots are filled
9821 // as follows:
9822 //
9823 // 3: output cell
9824 // 2: arguments pointer
9825 // 1: name
9826 // 0: pointer to the output cell
9827 //
9828 // Note that this is one more "argument" than the function expects
9829 // so the out cell will have to be popped explicitly after returning
9830 // from the function.
9831 __ mov(Operand(esp, 1 * kPointerSize), ebx); // name.
9832 __ mov(Operand(esp, 2 * kPointerSize), eax); // arguments pointer.
9833 __ mov(ebx, esp);
9834 __ add(Operand(ebx), Immediate(3 * kPointerSize));
9835 __ mov(Operand(esp, 0 * kPointerSize), ebx); // output
9836 __ mov(Operand(esp, 3 * kPointerSize), Immediate(0)); // out cell.
9837 }
9838 // Call the api function!
9839 __ call(fun()->address(), RelocInfo::RUNTIME_ENTRY);
9840 // Check if the function scheduled an exception.
9841 ExternalReference scheduled_exception_address =
9842 ExternalReference::scheduled_exception_address();
9843 __ cmp(Operand::StaticVariable(scheduled_exception_address),
9844 Immediate(Factory::the_hole_value()));
9845 __ j(not_equal, &promote_scheduled_exception, not_taken);
9846 if (!kPassHandlesDirectly) {
9847 // The returned value is a pointer to the handle holding the result.
9848 // Dereference this to get to the location.
9849 __ mov(eax, Operand(eax, 0));
9850 }
9851 // Check if the result handle holds 0
9852 __ test(eax, Operand(eax));
9853 __ j(not_zero, &get_result, taken);
9854 // It was zero; the result is undefined.
9855 __ mov(eax, Factory::undefined_value());
9856 __ jmp(&prologue);
9857 // It was non-zero. Dereference to get the result value.
9858 __ bind(&get_result);
9859 __ mov(eax, Operand(eax, 0));
9860 __ bind(&prologue);
9861 __ LeaveExitFrame(ExitFrame::MODE_NORMAL);
9862 __ ret(0);
9863 __ bind(&promote_scheduled_exception);
9864 __ TailCallRuntime(ExternalReference(Runtime::kPromoteScheduledException),
9865 0,
9866 1);
9867 }
9868
9869
GenerateCore(MacroAssembler * masm,Label * throw_normal_exception,Label * throw_termination_exception,Label * throw_out_of_memory_exception,bool do_gc,bool always_allocate_scope)9870 void CEntryStub::GenerateCore(MacroAssembler* masm,
9871 Label* throw_normal_exception,
9872 Label* throw_termination_exception,
9873 Label* throw_out_of_memory_exception,
9874 bool do_gc,
9875 bool always_allocate_scope) {
9876 // eax: result parameter for PerformGC, if any
9877 // ebx: pointer to C function (C callee-saved)
9878 // ebp: frame pointer (restored after C call)
9879 // esp: stack pointer (restored after C call)
9880 // edi: number of arguments including receiver (C callee-saved)
9881 // esi: pointer to the first argument (C callee-saved)
9882
9883 // Result returned in eax, or eax+edx if result_size_ is 2.
9884
9885 if (do_gc) {
9886 __ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
9887 __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
9888 }
9889
9890 ExternalReference scope_depth =
9891 ExternalReference::heap_always_allocate_scope_depth();
9892 if (always_allocate_scope) {
9893 __ inc(Operand::StaticVariable(scope_depth));
9894 }
9895
9896 // Call C function.
9897 __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
9898 __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
9899 __ call(Operand(ebx));
9900 // Result is in eax or edx:eax - do not destroy these registers!
9901
9902 if (always_allocate_scope) {
9903 __ dec(Operand::StaticVariable(scope_depth));
9904 }
9905
9906 // Make sure we're not trying to return 'the hole' from the runtime
9907 // call as this may lead to crashes in the IC code later.
9908 if (FLAG_debug_code) {
9909 Label okay;
9910 __ cmp(eax, Factory::the_hole_value());
9911 __ j(not_equal, &okay);
9912 __ int3();
9913 __ bind(&okay);
9914 }
9915
9916 // Check for failure result.
9917 Label failure_returned;
9918 ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
9919 __ lea(ecx, Operand(eax, 1));
9920 // Lower 2 bits of ecx are 0 iff eax has failure tag.
9921 __ test(ecx, Immediate(kFailureTagMask));
9922 __ j(zero, &failure_returned, not_taken);
9923
9924 // Exit the JavaScript to C++ exit frame.
9925 __ LeaveExitFrame(mode_);
9926 __ ret(0);
9927
9928 // Handling of failure.
9929 __ bind(&failure_returned);
9930
9931 Label retry;
9932 // If the returned exception is RETRY_AFTER_GC continue at retry label
9933 ASSERT(Failure::RETRY_AFTER_GC == 0);
9934 __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
9935 __ j(zero, &retry, taken);
9936
9937 // Special handling of out of memory exceptions.
9938 __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
9939 __ j(equal, throw_out_of_memory_exception);
9940
9941 // Retrieve the pending exception and clear the variable.
9942 ExternalReference pending_exception_address(Top::k_pending_exception_address);
9943 __ mov(eax, Operand::StaticVariable(pending_exception_address));
9944 __ mov(edx,
9945 Operand::StaticVariable(ExternalReference::the_hole_value_location()));
9946 __ mov(Operand::StaticVariable(pending_exception_address), edx);
9947
9948 // Special handling of termination exceptions which are uncatchable
9949 // by javascript code.
9950 __ cmp(eax, Factory::termination_exception());
9951 __ j(equal, throw_termination_exception);
9952
9953 // Handle normal exception.
9954 __ jmp(throw_normal_exception);
9955
9956 // Retry.
9957 __ bind(&retry);
9958 }
9959
9960
GenerateThrowUncatchable(MacroAssembler * masm,UncatchableExceptionType type)9961 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
9962 UncatchableExceptionType type) {
9963 // Adjust this code if not the case.
9964 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
9965
9966 // Drop sp to the top stack handler.
9967 ExternalReference handler_address(Top::k_handler_address);
9968 __ mov(esp, Operand::StaticVariable(handler_address));
9969
9970 // Unwind the handlers until the ENTRY handler is found.
9971 Label loop, done;
9972 __ bind(&loop);
9973 // Load the type of the current stack handler.
9974 const int kStateOffset = StackHandlerConstants::kStateOffset;
9975 __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
9976 __ j(equal, &done);
9977 // Fetch the next handler in the list.
9978 const int kNextOffset = StackHandlerConstants::kNextOffset;
9979 __ mov(esp, Operand(esp, kNextOffset));
9980 __ jmp(&loop);
9981 __ bind(&done);
9982
9983 // Set the top handler address to next handler past the current ENTRY handler.
9984 ASSERT(StackHandlerConstants::kNextOffset == 0);
9985 __ pop(Operand::StaticVariable(handler_address));
9986
9987 if (type == OUT_OF_MEMORY) {
9988 // Set external caught exception to false.
9989 ExternalReference external_caught(Top::k_external_caught_exception_address);
9990 __ mov(eax, false);
9991 __ mov(Operand::StaticVariable(external_caught), eax);
9992
9993 // Set pending exception and eax to out of memory exception.
9994 ExternalReference pending_exception(Top::k_pending_exception_address);
9995 __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
9996 __ mov(Operand::StaticVariable(pending_exception), eax);
9997 }
9998
9999 // Clear the context pointer.
10000 __ xor_(esi, Operand(esi));
10001
10002 // Restore fp from handler and discard handler state.
10003 ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
10004 __ pop(ebp);
10005 __ pop(edx); // State.
10006
10007 ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
10008 __ ret(0);
10009 }
10010
10011
Generate(MacroAssembler * masm)10012 void CEntryStub::Generate(MacroAssembler* masm) {
10013 // eax: number of arguments including receiver
10014 // ebx: pointer to C function (C callee-saved)
10015 // ebp: frame pointer (restored after C call)
10016 // esp: stack pointer (restored after C call)
10017 // esi: current context (C callee-saved)
10018 // edi: JS function of the caller (C callee-saved)
10019
10020 // NOTE: Invocations of builtins may return failure objects instead
10021 // of a proper result. The builtin entry handles this by performing
10022 // a garbage collection and retrying the builtin (twice).
10023
10024 // Enter the exit frame that transitions from JavaScript to C++.
10025 __ EnterExitFrame(mode_);
10026
10027 // eax: result parameter for PerformGC, if any (setup below)
10028 // ebx: pointer to builtin function (C callee-saved)
10029 // ebp: frame pointer (restored after C call)
10030 // esp: stack pointer (restored after C call)
10031 // edi: number of arguments including receiver (C callee-saved)
10032 // esi: argv pointer (C callee-saved)
10033
10034 Label throw_normal_exception;
10035 Label throw_termination_exception;
10036 Label throw_out_of_memory_exception;
10037
10038 // Call into the runtime system.
10039 GenerateCore(masm,
10040 &throw_normal_exception,
10041 &throw_termination_exception,
10042 &throw_out_of_memory_exception,
10043 false,
10044 false);
10045
10046 // Do space-specific GC and retry runtime call.
10047 GenerateCore(masm,
10048 &throw_normal_exception,
10049 &throw_termination_exception,
10050 &throw_out_of_memory_exception,
10051 true,
10052 false);
10053
10054 // Do full GC and retry runtime call one final time.
10055 Failure* failure = Failure::InternalError();
10056 __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
10057 GenerateCore(masm,
10058 &throw_normal_exception,
10059 &throw_termination_exception,
10060 &throw_out_of_memory_exception,
10061 true,
10062 true);
10063
10064 __ bind(&throw_out_of_memory_exception);
10065 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
10066
10067 __ bind(&throw_termination_exception);
10068 GenerateThrowUncatchable(masm, TERMINATION);
10069
10070 __ bind(&throw_normal_exception);
10071 GenerateThrowTOS(masm);
10072 }
10073
10074
GenerateBody(MacroAssembler * masm,bool is_construct)10075 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
10076 Label invoke, exit;
10077 #ifdef ENABLE_LOGGING_AND_PROFILING
10078 Label not_outermost_js, not_outermost_js_2;
10079 #endif
10080
10081 // Setup frame.
10082 __ push(ebp);
10083 __ mov(ebp, Operand(esp));
10084
10085 // Push marker in two places.
10086 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
10087 __ push(Immediate(Smi::FromInt(marker))); // context slot
10088 __ push(Immediate(Smi::FromInt(marker))); // function slot
10089 // Save callee-saved registers (C calling conventions).
10090 __ push(edi);
10091 __ push(esi);
10092 __ push(ebx);
10093
10094 // Save copies of the top frame descriptor on the stack.
10095 ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
10096 __ push(Operand::StaticVariable(c_entry_fp));
10097
10098 #ifdef ENABLE_LOGGING_AND_PROFILING
10099 // If this is the outermost JS call, set js_entry_sp value.
10100 ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
10101 __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
10102 __ j(not_equal, ¬_outermost_js);
10103 __ mov(Operand::StaticVariable(js_entry_sp), ebp);
10104 __ bind(¬_outermost_js);
10105 #endif
10106
10107 // Call a faked try-block that does the invoke.
10108 __ call(&invoke);
10109
10110 // Caught exception: Store result (exception) in the pending
10111 // exception field in the JSEnv and return a failure sentinel.
10112 ExternalReference pending_exception(Top::k_pending_exception_address);
10113 __ mov(Operand::StaticVariable(pending_exception), eax);
10114 __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
10115 __ jmp(&exit);
10116
10117 // Invoke: Link this frame into the handler chain.
10118 __ bind(&invoke);
10119 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
10120
10121 // Clear any pending exceptions.
10122 __ mov(edx,
10123 Operand::StaticVariable(ExternalReference::the_hole_value_location()));
10124 __ mov(Operand::StaticVariable(pending_exception), edx);
10125
10126 // Fake a receiver (NULL).
10127 __ push(Immediate(0)); // receiver
10128
10129 // Invoke the function by calling through JS entry trampoline
10130 // builtin and pop the faked function when we return. Notice that we
10131 // cannot store a reference to the trampoline code directly in this
10132 // stub, because the builtin stubs may not have been generated yet.
10133 if (is_construct) {
10134 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
10135 __ mov(edx, Immediate(construct_entry));
10136 } else {
10137 ExternalReference entry(Builtins::JSEntryTrampoline);
10138 __ mov(edx, Immediate(entry));
10139 }
10140 __ mov(edx, Operand(edx, 0)); // deref address
10141 __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
10142 __ call(Operand(edx));
10143
10144 // Unlink this frame from the handler chain.
10145 __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
10146 // Pop next_sp.
10147 __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
10148
10149 #ifdef ENABLE_LOGGING_AND_PROFILING
10150 // If current EBP value is the same as js_entry_sp value, it means that
10151 // the current function is the outermost.
10152 __ cmp(ebp, Operand::StaticVariable(js_entry_sp));
10153 __ j(not_equal, ¬_outermost_js_2);
10154 __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
10155 __ bind(¬_outermost_js_2);
10156 #endif
10157
10158 // Restore the top frame descriptor from the stack.
10159 __ bind(&exit);
10160 __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address)));
10161
10162 // Restore callee-saved registers (C calling conventions).
10163 __ pop(ebx);
10164 __ pop(esi);
10165 __ pop(edi);
10166 __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers
10167
10168 // Restore frame pointer and return.
10169 __ pop(ebp);
10170 __ ret(0);
10171 }
10172
10173
Generate(MacroAssembler * masm)10174 void InstanceofStub::Generate(MacroAssembler* masm) {
10175 // Get the object - go slow case if it's a smi.
10176 Label slow;
10177 __ mov(eax, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, function
10178 __ test(eax, Immediate(kSmiTagMask));
10179 __ j(zero, &slow, not_taken);
10180
10181 // Check that the left hand is a JS object.
10182 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); // eax - object map
10183 __ movzx_b(ecx, FieldOperand(eax, Map::kInstanceTypeOffset)); // ecx - type
10184 __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
10185 __ j(less, &slow, not_taken);
10186 __ cmp(ecx, LAST_JS_OBJECT_TYPE);
10187 __ j(greater, &slow, not_taken);
10188
10189 // Get the prototype of the function.
10190 __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
10191 __ TryGetFunctionPrototype(edx, ebx, ecx, &slow);
10192
10193 // Check that the function prototype is a JS object.
10194 __ test(ebx, Immediate(kSmiTagMask));
10195 __ j(zero, &slow, not_taken);
10196 __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
10197 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
10198 __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
10199 __ j(less, &slow, not_taken);
10200 __ cmp(ecx, LAST_JS_OBJECT_TYPE);
10201 __ j(greater, &slow, not_taken);
10202
10203 // Register mapping: eax is object map and ebx is function prototype.
10204 __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
10205
10206 // Loop through the prototype chain looking for the function prototype.
10207 Label loop, is_instance, is_not_instance;
10208 __ bind(&loop);
10209 __ cmp(ecx, Operand(ebx));
10210 __ j(equal, &is_instance);
10211 __ cmp(Operand(ecx), Immediate(Factory::null_value()));
10212 __ j(equal, &is_not_instance);
10213 __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
10214 __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
10215 __ jmp(&loop);
10216
10217 __ bind(&is_instance);
10218 __ Set(eax, Immediate(0));
10219 __ ret(2 * kPointerSize);
10220
10221 __ bind(&is_not_instance);
10222 __ Set(eax, Immediate(Smi::FromInt(1)));
10223 __ ret(2 * kPointerSize);
10224
10225 // Slow-case: Go through the JavaScript implementation.
10226 __ bind(&slow);
10227 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
10228 }
10229
10230
10231 // Unfortunately you have to run without snapshots to see most of these
10232 // names in the profile since most compare stubs end up in the snapshot.
GetName()10233 const char* CompareStub::GetName() {
10234 switch (cc_) {
10235 case less: return "CompareStub_LT";
10236 case greater: return "CompareStub_GT";
10237 case less_equal: return "CompareStub_LE";
10238 case greater_equal: return "CompareStub_GE";
10239 case not_equal: {
10240 if (strict_) {
10241 if (never_nan_nan_) {
10242 return "CompareStub_NE_STRICT_NO_NAN";
10243 } else {
10244 return "CompareStub_NE_STRICT";
10245 }
10246 } else {
10247 if (never_nan_nan_) {
10248 return "CompareStub_NE_NO_NAN";
10249 } else {
10250 return "CompareStub_NE";
10251 }
10252 }
10253 }
10254 case equal: {
10255 if (strict_) {
10256 if (never_nan_nan_) {
10257 return "CompareStub_EQ_STRICT_NO_NAN";
10258 } else {
10259 return "CompareStub_EQ_STRICT";
10260 }
10261 } else {
10262 if (never_nan_nan_) {
10263 return "CompareStub_EQ_NO_NAN";
10264 } else {
10265 return "CompareStub_EQ";
10266 }
10267 }
10268 }
10269 default: return "CompareStub";
10270 }
10271 }
10272
10273
MinorKey()10274 int CompareStub::MinorKey() {
10275 // Encode the three parameters in a unique 16 bit value.
10276 ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
10277 int nnn_value = (never_nan_nan_ ? 2 : 0);
10278 if (cc_ != equal) nnn_value = 0; // Avoid duplicate stubs.
10279 return (static_cast<unsigned>(cc_) << 2) | nnn_value | (strict_ ? 1 : 0);
10280 }
10281
10282
Generate(MacroAssembler * masm)10283 void StringAddStub::Generate(MacroAssembler* masm) {
10284 Label string_add_runtime;
10285
10286 // Load the two arguments.
10287 __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
10288 __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
10289
10290 // Make sure that both arguments are strings if not known in advance.
10291 if (string_check_) {
10292 __ test(eax, Immediate(kSmiTagMask));
10293 __ j(zero, &string_add_runtime);
10294 __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
10295 __ j(above_equal, &string_add_runtime);
10296
10297 // First argument is a a string, test second.
10298 __ test(edx, Immediate(kSmiTagMask));
10299 __ j(zero, &string_add_runtime);
10300 __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
10301 __ j(above_equal, &string_add_runtime);
10302 }
10303
10304 // Both arguments are strings.
10305 // eax: first string
10306 // edx: second string
10307 // Check if either of the strings are empty. In that case return the other.
10308 Label second_not_zero_length, both_not_zero_length;
10309 __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
10310 __ test(ecx, Operand(ecx));
10311 __ j(not_zero, &second_not_zero_length);
10312 // Second string is empty, result is first string which is already in eax.
10313 __ IncrementCounter(&Counters::string_add_native, 1);
10314 __ ret(2 * kPointerSize);
10315 __ bind(&second_not_zero_length);
10316 __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
10317 __ test(ebx, Operand(ebx));
10318 __ j(not_zero, &both_not_zero_length);
10319 // First string is empty, result is second string which is in edx.
10320 __ mov(eax, edx);
10321 __ IncrementCounter(&Counters::string_add_native, 1);
10322 __ ret(2 * kPointerSize);
10323
10324 // Both strings are non-empty.
10325 // eax: first string
10326 // ebx: length of first string
10327 // ecx: length of second string
10328 // edx: second string
10329 // Look at the length of the result of adding the two strings.
10330 Label string_add_flat_result, longer_than_two;
10331 __ bind(&both_not_zero_length);
10332 __ add(ebx, Operand(ecx));
10333 // Use the runtime system when adding two one character strings, as it
10334 // contains optimizations for this specific case using the symbol table.
10335 __ cmp(ebx, 2);
10336 __ j(not_equal, &longer_than_two);
10337
10338 // Check that both strings are non-external ascii strings.
10339 __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
10340 &string_add_runtime);
10341
10342 // Get the two characters forming the sub string.
10343 __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
10344 __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
10345
10346 // Try to lookup two character string in symbol table. If it is not found
10347 // just allocate a new one.
10348 Label make_two_character_string, make_flat_ascii_string;
10349 GenerateTwoCharacterSymbolTableProbe(masm, ebx, ecx, eax, edx, edi,
10350 &make_two_character_string);
10351 __ ret(2 * kPointerSize);
10352
10353 __ bind(&make_two_character_string);
10354 __ Set(ebx, Immediate(2));
10355 __ jmp(&make_flat_ascii_string);
10356
10357 __ bind(&longer_than_two);
10358 // Check if resulting string will be flat.
10359 __ cmp(ebx, String::kMinNonFlatLength);
10360 __ j(below, &string_add_flat_result);
10361 // Handle exceptionally long strings in the runtime system.
10362 ASSERT((String::kMaxLength & 0x80000000) == 0);
10363 __ cmp(ebx, String::kMaxLength);
10364 __ j(above, &string_add_runtime);
10365
10366 // If result is not supposed to be flat allocate a cons string object. If both
10367 // strings are ascii the result is an ascii cons string.
10368 Label non_ascii, allocated;
10369 __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
10370 __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
10371 __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
10372 __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
10373 __ and_(ecx, Operand(edi));
10374 ASSERT(kStringEncodingMask == kAsciiStringTag);
10375 __ test(ecx, Immediate(kAsciiStringTag));
10376 __ j(zero, &non_ascii);
10377 // Allocate an acsii cons string.
10378 __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
10379 __ bind(&allocated);
10380 // Fill the fields of the cons string.
10381 __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
10382 __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
10383 Immediate(String::kEmptyHashField));
10384 __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
10385 __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
10386 __ mov(eax, ecx);
10387 __ IncrementCounter(&Counters::string_add_native, 1);
10388 __ ret(2 * kPointerSize);
10389 __ bind(&non_ascii);
10390 // Allocate a two byte cons string.
10391 __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime);
10392 __ jmp(&allocated);
10393
10394 // Handle creating a flat result. First check that both strings are not
10395 // external strings.
10396 // eax: first string
10397 // ebx: length of resulting flat string
10398 // edx: second string
10399 __ bind(&string_add_flat_result);
10400 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
10401 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
10402 __ and_(ecx, kStringRepresentationMask);
10403 __ cmp(ecx, kExternalStringTag);
10404 __ j(equal, &string_add_runtime);
10405 __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
10406 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
10407 __ and_(ecx, kStringRepresentationMask);
10408 __ cmp(ecx, kExternalStringTag);
10409 __ j(equal, &string_add_runtime);
10410 // Now check if both strings are ascii strings.
10411 // eax: first string
10412 // ebx: length of resulting flat string
10413 // edx: second string
10414 Label non_ascii_string_add_flat_result;
10415 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
10416 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
10417 ASSERT(kStringEncodingMask == kAsciiStringTag);
10418 __ test(ecx, Immediate(kAsciiStringTag));
10419 __ j(zero, &non_ascii_string_add_flat_result);
10420 __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
10421 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
10422 __ test(ecx, Immediate(kAsciiStringTag));
10423 __ j(zero, &string_add_runtime);
10424
10425 __ bind(&make_flat_ascii_string);
10426 // Both strings are ascii strings. As they are short they are both flat.
10427 // ebx: length of resulting flat string
10428 __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
10429 // eax: result string
10430 __ mov(ecx, eax);
10431 // Locate first character of result.
10432 __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
10433 // Load first argument and locate first character.
10434 __ mov(edx, Operand(esp, 2 * kPointerSize));
10435 __ mov(edi, FieldOperand(edx, String::kLengthOffset));
10436 __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
10437 // eax: result string
10438 // ecx: first character of result
10439 // edx: first char of first argument
10440 // edi: length of first argument
10441 GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
10442 // Load second argument and locate first character.
10443 __ mov(edx, Operand(esp, 1 * kPointerSize));
10444 __ mov(edi, FieldOperand(edx, String::kLengthOffset));
10445 __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
10446 // eax: result string
10447 // ecx: next character of result
10448 // edx: first char of second argument
10449 // edi: length of second argument
10450 GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
10451 __ IncrementCounter(&Counters::string_add_native, 1);
10452 __ ret(2 * kPointerSize);
10453
10454 // Handle creating a flat two byte result.
10455 // eax: first string - known to be two byte
10456 // ebx: length of resulting flat string
10457 // edx: second string
10458 __ bind(&non_ascii_string_add_flat_result);
10459 __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
10460 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
10461 __ and_(ecx, kAsciiStringTag);
10462 __ j(not_zero, &string_add_runtime);
10463 // Both strings are two byte strings. As they are short they are both
10464 // flat.
10465 __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
10466 // eax: result string
10467 __ mov(ecx, eax);
10468 // Locate first character of result.
10469 __ add(Operand(ecx),
10470 Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
10471 // Load first argument and locate first character.
10472 __ mov(edx, Operand(esp, 2 * kPointerSize));
10473 __ mov(edi, FieldOperand(edx, String::kLengthOffset));
10474 __ add(Operand(edx),
10475 Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
10476 // eax: result string
10477 // ecx: first character of result
10478 // edx: first char of first argument
10479 // edi: length of first argument
10480 GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
10481 // Load second argument and locate first character.
10482 __ mov(edx, Operand(esp, 1 * kPointerSize));
10483 __ mov(edi, FieldOperand(edx, String::kLengthOffset));
10484 __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
10485 // eax: result string
10486 // ecx: next character of result
10487 // edx: first char of second argument
10488 // edi: length of second argument
10489 GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
10490 __ IncrementCounter(&Counters::string_add_native, 1);
10491 __ ret(2 * kPointerSize);
10492
10493 // Just jump to runtime to add the two strings.
10494 __ bind(&string_add_runtime);
10495 __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
10496 }
10497
10498
GenerateCopyCharacters(MacroAssembler * masm,Register dest,Register src,Register count,Register scratch,bool ascii)10499 void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
10500 Register dest,
10501 Register src,
10502 Register count,
10503 Register scratch,
10504 bool ascii) {
10505 Label loop;
10506 __ bind(&loop);
10507 // This loop just copies one character at a time, as it is only used for very
10508 // short strings.
10509 if (ascii) {
10510 __ mov_b(scratch, Operand(src, 0));
10511 __ mov_b(Operand(dest, 0), scratch);
10512 __ add(Operand(src), Immediate(1));
10513 __ add(Operand(dest), Immediate(1));
10514 } else {
10515 __ mov_w(scratch, Operand(src, 0));
10516 __ mov_w(Operand(dest, 0), scratch);
10517 __ add(Operand(src), Immediate(2));
10518 __ add(Operand(dest), Immediate(2));
10519 }
10520 __ sub(Operand(count), Immediate(1));
10521 __ j(not_zero, &loop);
10522 }
10523
10524
GenerateCopyCharactersREP(MacroAssembler * masm,Register dest,Register src,Register count,Register scratch,bool ascii)10525 void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
10526 Register dest,
10527 Register src,
10528 Register count,
10529 Register scratch,
10530 bool ascii) {
10531 // Copy characters using rep movs of doublewords. Align destination on 4 byte
10532 // boundary before starting rep movs. Copy remaining characters after running
10533 // rep movs.
10534 ASSERT(dest.is(edi)); // rep movs destination
10535 ASSERT(src.is(esi)); // rep movs source
10536 ASSERT(count.is(ecx)); // rep movs count
10537 ASSERT(!scratch.is(dest));
10538 ASSERT(!scratch.is(src));
10539 ASSERT(!scratch.is(count));
10540
10541 // Nothing to do for zero characters.
10542 Label done;
10543 __ test(count, Operand(count));
10544 __ j(zero, &done);
10545
10546 // Make count the number of bytes to copy.
10547 if (!ascii) {
10548 __ shl(count, 1);
10549 }
10550
10551 // Don't enter the rep movs if there are less than 4 bytes to copy.
10552 Label last_bytes;
10553 __ test(count, Immediate(~3));
10554 __ j(zero, &last_bytes);
10555
10556 // Copy from edi to esi using rep movs instruction.
10557 __ mov(scratch, count);
10558 __ sar(count, 2); // Number of doublewords to copy.
10559 __ rep_movs();
10560
10561 // Find number of bytes left.
10562 __ mov(count, scratch);
10563 __ and_(count, 3);
10564
10565 // Check if there are more bytes to copy.
10566 __ bind(&last_bytes);
10567 __ test(count, Operand(count));
10568 __ j(zero, &done);
10569
10570 // Copy remaining characters.
10571 Label loop;
10572 __ bind(&loop);
10573 __ mov_b(scratch, Operand(src, 0));
10574 __ mov_b(Operand(dest, 0), scratch);
10575 __ add(Operand(src), Immediate(1));
10576 __ add(Operand(dest), Immediate(1));
10577 __ sub(Operand(count), Immediate(1));
10578 __ j(not_zero, &loop);
10579
10580 __ bind(&done);
10581 }
10582
10583
GenerateTwoCharacterSymbolTableProbe(MacroAssembler * masm,Register c1,Register c2,Register scratch1,Register scratch2,Register scratch3,Label * not_found)10584 void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
10585 Register c1,
10586 Register c2,
10587 Register scratch1,
10588 Register scratch2,
10589 Register scratch3,
10590 Label* not_found) {
10591 // Register scratch3 is the general scratch register in this function.
10592 Register scratch = scratch3;
10593
10594 // Make sure that both characters are not digits as such strings has a
10595 // different hash algorithm. Don't try to look for these in the symbol table.
10596 Label not_array_index;
10597 __ mov(scratch, c1);
10598 __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
10599 __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
10600 __ j(above, ¬_array_index);
10601 __ mov(scratch, c2);
10602 __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
10603 __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
10604 __ j(below_equal, not_found);
10605
10606 __ bind(¬_array_index);
10607 // Calculate the two character string hash.
10608 Register hash = scratch1;
10609 GenerateHashInit(masm, hash, c1, scratch);
10610 GenerateHashAddCharacter(masm, hash, c2, scratch);
10611 GenerateHashGetHash(masm, hash, scratch);
10612
10613 // Collect the two characters in a register.
10614 Register chars = c1;
10615 __ shl(c2, kBitsPerByte);
10616 __ or_(chars, Operand(c2));
10617
10618 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
10619 // hash: hash of two character string.
10620
10621 // Load the symbol table.
10622 Register symbol_table = c2;
10623 ExternalReference roots_address = ExternalReference::roots_address();
10624 __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
10625 __ mov(symbol_table,
10626 Operand::StaticArray(scratch, times_pointer_size, roots_address));
10627
10628 // Calculate capacity mask from the symbol table capacity.
10629 Register mask = scratch2;
10630 static const int kCapacityOffset =
10631 FixedArray::kHeaderSize +
10632 SymbolTable::kCapacityIndex * kPointerSize;
10633 __ mov(mask, FieldOperand(symbol_table, kCapacityOffset));
10634 __ SmiUntag(mask);
10635 __ sub(Operand(mask), Immediate(1));
10636
10637 // Registers
10638 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
10639 // hash: hash of two character string
10640 // symbol_table: symbol table
10641 // mask: capacity mask
10642 // scratch: -
10643
10644 // Perform a number of probes in the symbol table.
10645 static const int kProbes = 4;
10646 Label found_in_symbol_table;
10647 Label next_probe[kProbes], next_probe_pop_mask[kProbes];
10648 for (int i = 0; i < kProbes; i++) {
10649 // Calculate entry in symbol table.
10650 __ mov(scratch, hash);
10651 if (i > 0) {
10652 __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
10653 }
10654 __ and_(scratch, Operand(mask));
10655
10656 // Load the entry from the symble table.
10657 Register candidate = scratch; // Scratch register contains candidate.
10658 ASSERT_EQ(1, SymbolTableShape::kEntrySize);
10659 static const int kFirstElementOffset =
10660 FixedArray::kHeaderSize +
10661 SymbolTable::kPrefixStartIndex * kPointerSize +
10662 SymbolTableShape::kPrefixSize * kPointerSize;
10663 __ mov(candidate,
10664 FieldOperand(symbol_table,
10665 scratch,
10666 times_pointer_size,
10667 kFirstElementOffset));
10668
10669 // If entry is undefined no string with this hash can be found.
10670 __ cmp(candidate, Factory::undefined_value());
10671 __ j(equal, not_found);
10672
10673 // If length is not 2 the string is not a candidate.
10674 __ cmp(FieldOperand(candidate, String::kLengthOffset), Immediate(2));
10675 __ j(not_equal, &next_probe[i]);
10676
10677 // As we are out of registers save the mask on the stack and use that
10678 // register as a temporary.
10679 __ push(mask);
10680 Register temp = mask;
10681
10682 // Check that the candidate is a non-external ascii string.
10683 __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
10684 __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
10685 __ JumpIfInstanceTypeIsNotSequentialAscii(
10686 temp, temp, &next_probe_pop_mask[i]);
10687
10688 // Check if the two characters match.
10689 __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
10690 __ and_(temp, 0x0000ffff);
10691 __ cmp(chars, Operand(temp));
10692 __ j(equal, &found_in_symbol_table);
10693 __ bind(&next_probe_pop_mask[i]);
10694 __ pop(mask);
10695 __ bind(&next_probe[i]);
10696 }
10697
10698 // No matching 2 character string found by probing.
10699 __ jmp(not_found);
10700
10701 // Scratch register contains result when we fall through to here.
10702 Register result = scratch;
10703 __ bind(&found_in_symbol_table);
10704 __ pop(mask); // Pop temporally saved mask from the stack.
10705 if (!result.is(eax)) {
10706 __ mov(eax, result);
10707 }
10708 }
10709
10710
GenerateHashInit(MacroAssembler * masm,Register hash,Register character,Register scratch)10711 void StringStubBase::GenerateHashInit(MacroAssembler* masm,
10712 Register hash,
10713 Register character,
10714 Register scratch) {
10715 // hash = character + (character << 10);
10716 __ mov(hash, character);
10717 __ shl(hash, 10);
10718 __ add(hash, Operand(character));
10719 // hash ^= hash >> 6;
10720 __ mov(scratch, hash);
10721 __ sar(scratch, 6);
10722 __ xor_(hash, Operand(scratch));
10723 }
10724
10725
GenerateHashAddCharacter(MacroAssembler * masm,Register hash,Register character,Register scratch)10726 void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm,
10727 Register hash,
10728 Register character,
10729 Register scratch) {
10730 // hash += character;
10731 __ add(hash, Operand(character));
10732 // hash += hash << 10;
10733 __ mov(scratch, hash);
10734 __ shl(scratch, 10);
10735 __ add(hash, Operand(scratch));
10736 // hash ^= hash >> 6;
10737 __ mov(scratch, hash);
10738 __ sar(scratch, 6);
10739 __ xor_(hash, Operand(scratch));
10740 }
10741
10742
GenerateHashGetHash(MacroAssembler * masm,Register hash,Register scratch)10743 void StringStubBase::GenerateHashGetHash(MacroAssembler* masm,
10744 Register hash,
10745 Register scratch) {
10746 // hash += hash << 3;
10747 __ mov(scratch, hash);
10748 __ shl(scratch, 3);
10749 __ add(hash, Operand(scratch));
10750 // hash ^= hash >> 11;
10751 __ mov(scratch, hash);
10752 __ sar(scratch, 11);
10753 __ xor_(hash, Operand(scratch));
10754 // hash += hash << 15;
10755 __ mov(scratch, hash);
10756 __ shl(scratch, 15);
10757 __ add(hash, Operand(scratch));
10758
10759 // if (hash == 0) hash = 27;
10760 Label hash_not_zero;
10761 __ test(hash, Operand(hash));
10762 __ j(not_zero, &hash_not_zero);
10763 __ mov(hash, Immediate(27));
10764 __ bind(&hash_not_zero);
10765 }
10766
10767
Generate(MacroAssembler * masm)10768 void SubStringStub::Generate(MacroAssembler* masm) {
10769 Label runtime;
10770
10771 // Stack frame on entry.
10772 // esp[0]: return address
10773 // esp[4]: to
10774 // esp[8]: from
10775 // esp[12]: string
10776
10777 // Make sure first argument is a string.
10778 __ mov(eax, Operand(esp, 3 * kPointerSize));
10779 ASSERT_EQ(0, kSmiTag);
10780 __ test(eax, Immediate(kSmiTagMask));
10781 __ j(zero, &runtime);
10782 Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
10783 __ j(NegateCondition(is_string), &runtime);
10784
10785 // eax: string
10786 // ebx: instance type
10787 // Calculate length of sub string using the smi values.
10788 Label result_longer_than_two;
10789 __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
10790 __ test(ecx, Immediate(kSmiTagMask));
10791 __ j(not_zero, &runtime);
10792 __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
10793 __ test(edx, Immediate(kSmiTagMask));
10794 __ j(not_zero, &runtime);
10795 __ sub(ecx, Operand(edx));
10796 // Special handling of sub-strings of length 1 and 2. One character strings
10797 // are handled in the runtime system (looked up in the single character
10798 // cache). Two character strings are looked for in the symbol cache.
10799 __ SmiUntag(ecx); // Result length is no longer smi.
10800 __ cmp(ecx, 2);
10801 __ j(greater, &result_longer_than_two);
10802 __ j(less, &runtime);
10803
10804 // Sub string of length 2 requested.
10805 // eax: string
10806 // ebx: instance type
10807 // ecx: sub string length (value is 2)
10808 // edx: from index (smi)
10809 __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
10810
10811 // Get the two characters forming the sub string.
10812 __ SmiUntag(edx); // From index is no longer smi.
10813 __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
10814 __ movzx_b(ecx,
10815 FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
10816
10817 // Try to lookup two character string in symbol table.
10818 Label make_two_character_string;
10819 GenerateTwoCharacterSymbolTableProbe(masm, ebx, ecx, eax, edx, edi,
10820 &make_two_character_string);
10821 __ ret(2 * kPointerSize);
10822
10823 __ bind(&make_two_character_string);
10824 // Setup registers for allocating the two character string.
10825 __ mov(eax, Operand(esp, 3 * kPointerSize));
10826 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
10827 __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
10828 __ Set(ecx, Immediate(2));
10829
10830 __ bind(&result_longer_than_two);
10831 // eax: string
10832 // ebx: instance type
10833 // ecx: result string length
10834 // Check for flat ascii string
10835 Label non_ascii_flat;
10836 __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
10837
10838 // Allocate the result.
10839 __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
10840
10841 // eax: result string
10842 // ecx: result string length
10843 __ mov(edx, esi); // esi used by following code.
10844 // Locate first character of result.
10845 __ mov(edi, eax);
10846 __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
10847 // Load string argument and locate character of sub string start.
10848 __ mov(esi, Operand(esp, 3 * kPointerSize));
10849 __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
10850 __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
10851 __ SmiUntag(ebx);
10852 __ add(esi, Operand(ebx));
10853
10854 // eax: result string
10855 // ecx: result length
10856 // edx: original value of esi
10857 // edi: first character of result
10858 // esi: character of sub string start
10859 GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
10860 __ mov(esi, edx); // Restore esi.
10861 __ IncrementCounter(&Counters::sub_string_native, 1);
10862 __ ret(3 * kPointerSize);
10863
10864 __ bind(&non_ascii_flat);
10865 // eax: string
10866 // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
10867 // ecx: result string length
10868 // Check for flat two byte string
10869 __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
10870 __ j(not_equal, &runtime);
10871
10872 // Allocate the result.
10873 __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
10874
10875 // eax: result string
10876 // ecx: result string length
10877 __ mov(edx, esi); // esi used by following code.
10878 // Locate first character of result.
10879 __ mov(edi, eax);
10880 __ add(Operand(edi),
10881 Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
10882 // Load string argument and locate character of sub string start.
10883 __ mov(esi, Operand(esp, 3 * kPointerSize));
10884 __ add(Operand(esi),
10885 Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
10886 __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
10887 // As from is a smi it is 2 times the value which matches the size of a two
10888 // byte character.
10889 ASSERT_EQ(0, kSmiTag);
10890 ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
10891 __ add(esi, Operand(ebx));
10892
10893 // eax: result string
10894 // ecx: result length
10895 // edx: original value of esi
10896 // edi: first character of result
10897 // esi: character of sub string start
10898 GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
10899 __ mov(esi, edx); // Restore esi.
10900 __ IncrementCounter(&Counters::sub_string_native, 1);
10901 __ ret(3 * kPointerSize);
10902
10903 // Just jump to runtime to create the sub string.
10904 __ bind(&runtime);
10905 __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
10906 }
10907
10908
GenerateCompareFlatAsciiStrings(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3)10909 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
10910 Register left,
10911 Register right,
10912 Register scratch1,
10913 Register scratch2,
10914 Register scratch3) {
10915 Label result_not_equal;
10916 Label result_greater;
10917 Label compare_lengths;
10918 // Find minimum length.
10919 Label left_shorter;
10920 __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
10921 __ mov(scratch3, scratch1);
10922 __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
10923
10924 Register length_delta = scratch3;
10925
10926 __ j(less_equal, &left_shorter);
10927 // Right string is shorter. Change scratch1 to be length of right string.
10928 __ sub(scratch1, Operand(length_delta));
10929 __ bind(&left_shorter);
10930
10931 Register min_length = scratch1;
10932
10933 // If either length is zero, just compare lengths.
10934 __ test(min_length, Operand(min_length));
10935 __ j(zero, &compare_lengths);
10936
10937 // Change index to run from -min_length to -1 by adding min_length
10938 // to string start. This means that loop ends when index reaches zero,
10939 // which doesn't need an additional compare.
10940 __ lea(left,
10941 FieldOperand(left,
10942 min_length, times_1,
10943 SeqAsciiString::kHeaderSize));
10944 __ lea(right,
10945 FieldOperand(right,
10946 min_length, times_1,
10947 SeqAsciiString::kHeaderSize));
10948 __ neg(min_length);
10949
10950 Register index = min_length; // index = -min_length;
10951
10952 {
10953 // Compare loop.
10954 Label loop;
10955 __ bind(&loop);
10956 // Compare characters.
10957 __ mov_b(scratch2, Operand(left, index, times_1, 0));
10958 __ cmpb(scratch2, Operand(right, index, times_1, 0));
10959 __ j(not_equal, &result_not_equal);
10960 __ add(Operand(index), Immediate(1));
10961 __ j(not_zero, &loop);
10962 }
10963
10964 // Compare lengths - strings up to min-length are equal.
10965 __ bind(&compare_lengths);
10966 __ test(length_delta, Operand(length_delta));
10967 __ j(not_zero, &result_not_equal);
10968
10969 // Result is EQUAL.
10970 ASSERT_EQ(0, EQUAL);
10971 ASSERT_EQ(0, kSmiTag);
10972 __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
10973 __ ret(2 * kPointerSize);
10974
10975 __ bind(&result_not_equal);
10976 __ j(greater, &result_greater);
10977
10978 // Result is LESS.
10979 __ Set(eax, Immediate(Smi::FromInt(LESS)));
10980 __ ret(2 * kPointerSize);
10981
10982 // Result is GREATER.
10983 __ bind(&result_greater);
10984 __ Set(eax, Immediate(Smi::FromInt(GREATER)));
10985 __ ret(2 * kPointerSize);
10986 }
10987
10988
Generate(MacroAssembler * masm)10989 void StringCompareStub::Generate(MacroAssembler* masm) {
10990 Label runtime;
10991
10992 // Stack frame on entry.
10993 // esp[0]: return address
10994 // esp[4]: right string
10995 // esp[8]: left string
10996
10997 __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
10998 __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
10999
11000 Label not_same;
11001 __ cmp(edx, Operand(eax));
11002 __ j(not_equal, ¬_same);
11003 ASSERT_EQ(0, EQUAL);
11004 ASSERT_EQ(0, kSmiTag);
11005 __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
11006 __ IncrementCounter(&Counters::string_compare_native, 1);
11007 __ ret(2 * kPointerSize);
11008
11009 __ bind(¬_same);
11010
11011 // Check that both objects are sequential ascii strings.
11012 __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
11013
11014 // Compare flat ascii strings.
11015 __ IncrementCounter(&Counters::string_compare_native, 1);
11016 GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
11017
11018 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
11019 // tagged as a small integer.
11020 __ bind(&runtime);
11021 __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
11022 }
11023
11024 #undef __
11025
11026 } } // namespace v8::internal
11027