1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #if V8_TARGET_ARCH_X87
8
9 #include "src/x87/lithium-codegen-x87.h"
10 #include "src/ic.h"
11 #include "src/code-stubs.h"
12 #include "src/deoptimizer.h"
13 #include "src/stub-cache.h"
14 #include "src/codegen.h"
15 #include "src/hydrogen-osr.h"
16
17 namespace v8 {
18 namespace internal {
19
20
21 // When invoking builtins, we need to record the safepoint in the middle of
22 // the invoke instruction sequence generated by the macro assembler.
23 class SafepointGenerator V8_FINAL : public CallWrapper {
24 public:
SafepointGenerator(LCodeGen * codegen,LPointerMap * pointers,Safepoint::DeoptMode mode)25 SafepointGenerator(LCodeGen* codegen,
26 LPointerMap* pointers,
27 Safepoint::DeoptMode mode)
28 : codegen_(codegen),
29 pointers_(pointers),
30 deopt_mode_(mode) {}
~SafepointGenerator()31 virtual ~SafepointGenerator() {}
32
BeforeCall(int call_size) const33 virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
34
AfterCall() const35 virtual void AfterCall() const V8_OVERRIDE {
36 codegen_->RecordSafepoint(pointers_, deopt_mode_);
37 }
38
39 private:
40 LCodeGen* codegen_;
41 LPointerMap* pointers_;
42 Safepoint::DeoptMode deopt_mode_;
43 };
44
45
46 #define __ masm()->
47
GenerateCode()48 bool LCodeGen::GenerateCode() {
49 LPhase phase("Z_Code generation", chunk());
50 ASSERT(is_unused());
51 status_ = GENERATING;
52
53 // Open a frame scope to indicate that there is a frame on the stack. The
54 // MANUAL indicates that the scope shouldn't actually generate code to set up
55 // the frame (that is done in GeneratePrologue).
56 FrameScope frame_scope(masm_, StackFrame::MANUAL);
57
58 support_aligned_spilled_doubles_ = info()->IsOptimizing();
59
60 dynamic_frame_alignment_ = info()->IsOptimizing() &&
61 ((chunk()->num_double_slots() > 2 &&
62 !chunk()->graph()->is_recursive()) ||
63 !info()->osr_ast_id().IsNone());
64
65 return GeneratePrologue() &&
66 GenerateBody() &&
67 GenerateDeferredCode() &&
68 GenerateJumpTable() &&
69 GenerateSafepointTable();
70 }
71
72
FinishCode(Handle<Code> code)73 void LCodeGen::FinishCode(Handle<Code> code) {
74 ASSERT(is_done());
75 code->set_stack_slots(GetStackSlotCount());
76 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
77 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
78 PopulateDeoptimizationData(code);
79 if (!info()->IsStub()) {
80 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
81 }
82 }
83
84
85 #ifdef _MSC_VER
MakeSureStackPagesMapped(int offset)86 void LCodeGen::MakeSureStackPagesMapped(int offset) {
87 const int kPageSize = 4 * KB;
88 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
89 __ mov(Operand(esp, offset), eax);
90 }
91 }
92 #endif
93
94
GeneratePrologue()95 bool LCodeGen::GeneratePrologue() {
96 ASSERT(is_generating());
97
98 if (info()->IsOptimizing()) {
99 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
100
101 #ifdef DEBUG
102 if (strlen(FLAG_stop_at) > 0 &&
103 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
104 __ int3();
105 }
106 #endif
107
108 // Sloppy mode functions and builtins need to replace the receiver with the
109 // global proxy when called as functions (without an explicit receiver
110 // object).
111 if (info_->this_has_uses() &&
112 info_->strict_mode() == SLOPPY &&
113 !info_->is_native()) {
114 Label ok;
115 // +1 for return address.
116 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
117 __ mov(ecx, Operand(esp, receiver_offset));
118
119 __ cmp(ecx, isolate()->factory()->undefined_value());
120 __ j(not_equal, &ok, Label::kNear);
121
122 __ mov(ecx, GlobalObjectOperand());
123 __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
124
125 __ mov(Operand(esp, receiver_offset), ecx);
126
127 __ bind(&ok);
128 }
129
130 if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
131 // Move state of dynamic frame alignment into edx.
132 __ Move(edx, Immediate(kNoAlignmentPadding));
133
134 Label do_not_pad, align_loop;
135 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
136 // Align esp + 4 to a multiple of 2 * kPointerSize.
137 __ test(esp, Immediate(kPointerSize));
138 __ j(not_zero, &do_not_pad, Label::kNear);
139 __ push(Immediate(0));
140 __ mov(ebx, esp);
141 __ mov(edx, Immediate(kAlignmentPaddingPushed));
142 // Copy arguments, receiver, and return address.
143 __ mov(ecx, Immediate(scope()->num_parameters() + 2));
144
145 __ bind(&align_loop);
146 __ mov(eax, Operand(ebx, 1 * kPointerSize));
147 __ mov(Operand(ebx, 0), eax);
148 __ add(Operand(ebx), Immediate(kPointerSize));
149 __ dec(ecx);
150 __ j(not_zero, &align_loop, Label::kNear);
151 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
152 __ bind(&do_not_pad);
153 }
154 }
155
156 info()->set_prologue_offset(masm_->pc_offset());
157 if (NeedsEagerFrame()) {
158 ASSERT(!frame_is_built_);
159 frame_is_built_ = true;
160 if (info()->IsStub()) {
161 __ StubPrologue();
162 } else {
163 __ Prologue(info()->IsCodePreAgingActive());
164 }
165 info()->AddNoFrameRange(0, masm_->pc_offset());
166 }
167
168 if (info()->IsOptimizing() &&
169 dynamic_frame_alignment_ &&
170 FLAG_debug_code) {
171 __ test(esp, Immediate(kPointerSize));
172 __ Assert(zero, kFrameIsExpectedToBeAligned);
173 }
174
175 // Reserve space for the stack slots needed by the code.
176 int slots = GetStackSlotCount();
177 ASSERT(slots != 0 || !info()->IsOptimizing());
178 if (slots > 0) {
179 if (slots == 1) {
180 if (dynamic_frame_alignment_) {
181 __ push(edx);
182 } else {
183 __ push(Immediate(kNoAlignmentPadding));
184 }
185 } else {
186 if (FLAG_debug_code) {
187 __ sub(Operand(esp), Immediate(slots * kPointerSize));
188 #ifdef _MSC_VER
189 MakeSureStackPagesMapped(slots * kPointerSize);
190 #endif
191 __ push(eax);
192 __ mov(Operand(eax), Immediate(slots));
193 Label loop;
194 __ bind(&loop);
195 __ mov(MemOperand(esp, eax, times_4, 0),
196 Immediate(kSlotsZapValue));
197 __ dec(eax);
198 __ j(not_zero, &loop);
199 __ pop(eax);
200 } else {
201 __ sub(Operand(esp), Immediate(slots * kPointerSize));
202 #ifdef _MSC_VER
203 MakeSureStackPagesMapped(slots * kPointerSize);
204 #endif
205 }
206
207 if (support_aligned_spilled_doubles_) {
208 Comment(";;; Store dynamic frame alignment tag for spilled doubles");
209 // Store dynamic frame alignment state in the first local.
210 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
211 if (dynamic_frame_alignment_) {
212 __ mov(Operand(ebp, offset), edx);
213 } else {
214 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
215 }
216 }
217 }
218 }
219
220 // Possibly allocate a local context.
221 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
222 if (heap_slots > 0) {
223 Comment(";;; Allocate local context");
224 bool need_write_barrier = true;
225 // Argument to NewContext is the function, which is still in edi.
226 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
227 FastNewContextStub stub(isolate(), heap_slots);
228 __ CallStub(&stub);
229 // Result of FastNewContextStub is always in new space.
230 need_write_barrier = false;
231 } else {
232 __ push(edi);
233 __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
234 }
235 RecordSafepoint(Safepoint::kNoLazyDeopt);
236 // Context is returned in eax. It replaces the context passed to us.
237 // It's saved in the stack and kept live in esi.
238 __ mov(esi, eax);
239 __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
240
241 // Copy parameters into context if necessary.
242 int num_parameters = scope()->num_parameters();
243 for (int i = 0; i < num_parameters; i++) {
244 Variable* var = scope()->parameter(i);
245 if (var->IsContextSlot()) {
246 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
247 (num_parameters - 1 - i) * kPointerSize;
248 // Load parameter from stack.
249 __ mov(eax, Operand(ebp, parameter_offset));
250 // Store it in the context.
251 int context_offset = Context::SlotOffset(var->index());
252 __ mov(Operand(esi, context_offset), eax);
253 // Update the write barrier. This clobbers eax and ebx.
254 if (need_write_barrier) {
255 __ RecordWriteContextSlot(esi,
256 context_offset,
257 eax,
258 ebx);
259 } else if (FLAG_debug_code) {
260 Label done;
261 __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
262 __ Abort(kExpectedNewSpaceObject);
263 __ bind(&done);
264 }
265 }
266 }
267 Comment(";;; End allocate local context");
268 }
269
270 // Trace the call.
271 if (FLAG_trace && info()->IsOptimizing()) {
272 // We have not executed any compiled code yet, so esi still holds the
273 // incoming context.
274 __ CallRuntime(Runtime::kTraceEnter, 0);
275 }
276 return !is_aborted();
277 }
278
279
GenerateOsrPrologue()280 void LCodeGen::GenerateOsrPrologue() {
281 // Generate the OSR entry prologue at the first unknown OSR value, or if there
282 // are none, at the OSR entrypoint instruction.
283 if (osr_pc_offset_ >= 0) return;
284
285 osr_pc_offset_ = masm()->pc_offset();
286
287 // Move state of dynamic frame alignment into edx.
288 __ Move(edx, Immediate(kNoAlignmentPadding));
289
290 if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
291 Label do_not_pad, align_loop;
292 // Align ebp + 4 to a multiple of 2 * kPointerSize.
293 __ test(ebp, Immediate(kPointerSize));
294 __ j(zero, &do_not_pad, Label::kNear);
295 __ push(Immediate(0));
296 __ mov(ebx, esp);
297 __ mov(edx, Immediate(kAlignmentPaddingPushed));
298
299 // Move all parts of the frame over one word. The frame consists of:
300 // unoptimized frame slots, alignment state, context, frame pointer, return
301 // address, receiver, and the arguments.
302 __ mov(ecx, Immediate(scope()->num_parameters() +
303 5 + graph()->osr()->UnoptimizedFrameSlots()));
304
305 __ bind(&align_loop);
306 __ mov(eax, Operand(ebx, 1 * kPointerSize));
307 __ mov(Operand(ebx, 0), eax);
308 __ add(Operand(ebx), Immediate(kPointerSize));
309 __ dec(ecx);
310 __ j(not_zero, &align_loop, Label::kNear);
311 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
312 __ sub(Operand(ebp), Immediate(kPointerSize));
313 __ bind(&do_not_pad);
314 }
315
316 // Save the first local, which is overwritten by the alignment state.
317 Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
318 __ push(alignment_loc);
319
320 // Set the dynamic frame alignment state.
321 __ mov(alignment_loc, edx);
322
323 // Adjust the frame size, subsuming the unoptimized frame into the
324 // optimized frame.
325 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
326 ASSERT(slots >= 1);
327 __ sub(esp, Immediate((slots - 1) * kPointerSize));
328 }
329
330
GenerateBodyInstructionPre(LInstruction * instr)331 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
332 if (instr->IsCall()) {
333 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
334 }
335 if (!instr->IsLazyBailout() && !instr->IsGap()) {
336 safepoints_.BumpLastLazySafepointIndex();
337 }
338 FlushX87StackIfNecessary(instr);
339 }
340
341
GenerateBodyInstructionPost(LInstruction * instr)342 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
343 if (instr->IsGoto()) {
344 x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
345 } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
346 !instr->IsGap() && !instr->IsReturn()) {
347 if (instr->ClobbersDoubleRegisters(isolate())) {
348 if (instr->HasDoubleRegisterResult()) {
349 ASSERT_EQ(1, x87_stack_.depth());
350 } else {
351 ASSERT_EQ(0, x87_stack_.depth());
352 }
353 }
354 __ VerifyX87StackDepth(x87_stack_.depth());
355 }
356 }
357
358
GenerateJumpTable()359 bool LCodeGen::GenerateJumpTable() {
360 Label needs_frame;
361 if (jump_table_.length() > 0) {
362 Comment(";;; -------------------- Jump table --------------------");
363 }
364 for (int i = 0; i < jump_table_.length(); i++) {
365 __ bind(&jump_table_[i].label);
366 Address entry = jump_table_[i].address;
367 Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
368 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
369 if (id == Deoptimizer::kNotDeoptimizationEntry) {
370 Comment(";;; jump table entry %d.", i);
371 } else {
372 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
373 }
374 if (jump_table_[i].needs_frame) {
375 ASSERT(!info()->saves_caller_doubles());
376 __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
377 if (needs_frame.is_bound()) {
378 __ jmp(&needs_frame);
379 } else {
380 __ bind(&needs_frame);
381 __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
382 // This variant of deopt can only be used with stubs. Since we don't
383 // have a function pointer to install in the stack frame that we're
384 // building, install a special marker there instead.
385 ASSERT(info()->IsStub());
386 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
387 // Push a PC inside the function so that the deopt code can find where
388 // the deopt comes from. It doesn't have to be the precise return
389 // address of a "calling" LAZY deopt, it only has to be somewhere
390 // inside the code body.
391 Label push_approx_pc;
392 __ call(&push_approx_pc);
393 __ bind(&push_approx_pc);
394 // Push the continuation which was stashed were the ebp should
395 // be. Replace it with the saved ebp.
396 __ push(MemOperand(esp, 3 * kPointerSize));
397 __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
398 __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
399 __ ret(0); // Call the continuation without clobbering registers.
400 }
401 } else {
402 __ call(entry, RelocInfo::RUNTIME_ENTRY);
403 }
404 }
405 return !is_aborted();
406 }
407
408
GenerateDeferredCode()409 bool LCodeGen::GenerateDeferredCode() {
410 ASSERT(is_generating());
411 if (deferred_.length() > 0) {
412 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
413 LDeferredCode* code = deferred_[i];
414 X87Stack copy(code->x87_stack());
415 x87_stack_ = copy;
416
417 HValue* value =
418 instructions_->at(code->instruction_index())->hydrogen_value();
419 RecordAndWritePosition(
420 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
421
422 Comment(";;; <@%d,#%d> "
423 "-------------------- Deferred %s --------------------",
424 code->instruction_index(),
425 code->instr()->hydrogen_value()->id(),
426 code->instr()->Mnemonic());
427 __ bind(code->entry());
428 if (NeedsDeferredFrame()) {
429 Comment(";;; Build frame");
430 ASSERT(!frame_is_built_);
431 ASSERT(info()->IsStub());
432 frame_is_built_ = true;
433 // Build the frame in such a way that esi isn't trashed.
434 __ push(ebp); // Caller's frame pointer.
435 __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
436 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
437 __ lea(ebp, Operand(esp, 2 * kPointerSize));
438 Comment(";;; Deferred code");
439 }
440 code->Generate();
441 if (NeedsDeferredFrame()) {
442 __ bind(code->done());
443 Comment(";;; Destroy frame");
444 ASSERT(frame_is_built_);
445 frame_is_built_ = false;
446 __ mov(esp, ebp);
447 __ pop(ebp);
448 }
449 __ jmp(code->exit());
450 }
451 }
452
453 // Deferred code is the last part of the instruction sequence. Mark
454 // the generated code as done unless we bailed out.
455 if (!is_aborted()) status_ = DONE;
456 return !is_aborted();
457 }
458
459
GenerateSafepointTable()460 bool LCodeGen::GenerateSafepointTable() {
461 ASSERT(is_done());
462 if (!info()->IsStub()) {
463 // For lazy deoptimization we need space to patch a call after every call.
464 // Ensure there is always space for such patching, even if the code ends
465 // in a call.
466 int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
467 while (masm()->pc_offset() < target_offset) {
468 masm()->nop();
469 }
470 }
471 safepoints_.Emit(masm(), GetStackSlotCount());
472 return !is_aborted();
473 }
474
475
ToRegister(int index) const476 Register LCodeGen::ToRegister(int index) const {
477 return Register::FromAllocationIndex(index);
478 }
479
480
ToX87Register(int index) const481 X87Register LCodeGen::ToX87Register(int index) const {
482 return X87Register::FromAllocationIndex(index);
483 }
484
485
X87LoadForUsage(X87Register reg)486 void LCodeGen::X87LoadForUsage(X87Register reg) {
487 ASSERT(x87_stack_.Contains(reg));
488 x87_stack_.Fxch(reg);
489 x87_stack_.pop();
490 }
491
492
X87LoadForUsage(X87Register reg1,X87Register reg2)493 void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
494 ASSERT(x87_stack_.Contains(reg1));
495 ASSERT(x87_stack_.Contains(reg2));
496 x87_stack_.Fxch(reg1, 1);
497 x87_stack_.Fxch(reg2);
498 x87_stack_.pop();
499 x87_stack_.pop();
500 }
501
502
Fxch(X87Register reg,int other_slot)503 void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
504 ASSERT(is_mutable_);
505 ASSERT(Contains(reg) && stack_depth_ > other_slot);
506 int i = ArrayIndex(reg);
507 int st = st2idx(i);
508 if (st != other_slot) {
509 int other_i = st2idx(other_slot);
510 X87Register other = stack_[other_i];
511 stack_[other_i] = reg;
512 stack_[i] = other;
513 if (st == 0) {
514 __ fxch(other_slot);
515 } else if (other_slot == 0) {
516 __ fxch(st);
517 } else {
518 __ fxch(st);
519 __ fxch(other_slot);
520 __ fxch(st);
521 }
522 }
523 }
524
525
st2idx(int pos)526 int LCodeGen::X87Stack::st2idx(int pos) {
527 return stack_depth_ - pos - 1;
528 }
529
530
ArrayIndex(X87Register reg)531 int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
532 for (int i = 0; i < stack_depth_; i++) {
533 if (stack_[i].is(reg)) return i;
534 }
535 UNREACHABLE();
536 return -1;
537 }
538
539
Contains(X87Register reg)540 bool LCodeGen::X87Stack::Contains(X87Register reg) {
541 for (int i = 0; i < stack_depth_; i++) {
542 if (stack_[i].is(reg)) return true;
543 }
544 return false;
545 }
546
547
Free(X87Register reg)548 void LCodeGen::X87Stack::Free(X87Register reg) {
549 ASSERT(is_mutable_);
550 ASSERT(Contains(reg));
551 int i = ArrayIndex(reg);
552 int st = st2idx(i);
553 if (st > 0) {
554 // keep track of how fstp(i) changes the order of elements
555 int tos_i = st2idx(0);
556 stack_[i] = stack_[tos_i];
557 }
558 pop();
559 __ fstp(st);
560 }
561
562
X87Mov(X87Register dst,Operand src,X87OperandType opts)563 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
564 if (x87_stack_.Contains(dst)) {
565 x87_stack_.Fxch(dst);
566 __ fstp(0);
567 } else {
568 x87_stack_.push(dst);
569 }
570 X87Fld(src, opts);
571 }
572
573
X87Fld(Operand src,X87OperandType opts)574 void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
575 ASSERT(!src.is_reg_only());
576 switch (opts) {
577 case kX87DoubleOperand:
578 __ fld_d(src);
579 break;
580 case kX87FloatOperand:
581 __ fld_s(src);
582 break;
583 case kX87IntOperand:
584 __ fild_s(src);
585 break;
586 default:
587 UNREACHABLE();
588 }
589 }
590
591
X87Mov(Operand dst,X87Register src,X87OperandType opts)592 void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
593 ASSERT(!dst.is_reg_only());
594 x87_stack_.Fxch(src);
595 switch (opts) {
596 case kX87DoubleOperand:
597 __ fst_d(dst);
598 break;
599 case kX87IntOperand:
600 __ fist_s(dst);
601 break;
602 default:
603 UNREACHABLE();
604 }
605 }
606
607
PrepareToWrite(X87Register reg)608 void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
609 ASSERT(is_mutable_);
610 if (Contains(reg)) {
611 Free(reg);
612 }
613 // Mark this register as the next register to write to
614 stack_[stack_depth_] = reg;
615 }
616
617
CommitWrite(X87Register reg)618 void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
619 ASSERT(is_mutable_);
620 // Assert the reg is prepared to write, but not on the virtual stack yet
621 ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) &&
622 stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
623 stack_depth_++;
624 }
625
626
X87PrepareBinaryOp(X87Register left,X87Register right,X87Register result)627 void LCodeGen::X87PrepareBinaryOp(
628 X87Register left, X87Register right, X87Register result) {
629 // You need to use DefineSameAsFirst for x87 instructions
630 ASSERT(result.is(left));
631 x87_stack_.Fxch(right, 1);
632 x87_stack_.Fxch(left);
633 }
634
635
FlushIfNecessary(LInstruction * instr,LCodeGen * cgen)636 void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
637 if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) {
638 bool double_inputs = instr->HasDoubleRegisterInput();
639
640 // Flush stack from tos down, since FreeX87() will mess with tos
641 for (int i = stack_depth_-1; i >= 0; i--) {
642 X87Register reg = stack_[i];
643 // Skip registers which contain the inputs for the next instruction
644 // when flushing the stack
645 if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
646 continue;
647 }
648 Free(reg);
649 if (i < stack_depth_-1) i++;
650 }
651 }
652 if (instr->IsReturn()) {
653 while (stack_depth_ > 0) {
654 __ fstp(0);
655 stack_depth_--;
656 }
657 if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
658 }
659 }
660
661
LeavingBlock(int current_block_id,LGoto * goto_instr)662 void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) {
663 ASSERT(stack_depth_ <= 1);
664 // If ever used for new stubs producing two pairs of doubles joined into two
665 // phis this assert hits. That situation is not handled, since the two stacks
666 // might have st0 and st1 swapped.
667 if (current_block_id + 1 != goto_instr->block_id()) {
668 // If we have a value on the x87 stack on leaving a block, it must be a
669 // phi input. If the next block we compile is not the join block, we have
670 // to discard the stack state.
671 stack_depth_ = 0;
672 }
673 }
674
675
EmitFlushX87ForDeopt()676 void LCodeGen::EmitFlushX87ForDeopt() {
677 // The deoptimizer does not support X87 Registers. But as long as we
678 // deopt from a stub its not a problem, since we will re-materialize the
679 // original stub inputs, which can't be double registers.
680 ASSERT(info()->IsStub());
681 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
682 __ pushfd();
683 __ VerifyX87StackDepth(x87_stack_.depth());
684 __ popfd();
685 }
686 for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0);
687 }
688
689
ToRegister(LOperand * op) const690 Register LCodeGen::ToRegister(LOperand* op) const {
691 ASSERT(op->IsRegister());
692 return ToRegister(op->index());
693 }
694
695
ToX87Register(LOperand * op) const696 X87Register LCodeGen::ToX87Register(LOperand* op) const {
697 ASSERT(op->IsDoubleRegister());
698 return ToX87Register(op->index());
699 }
700
701
ToInteger32(LConstantOperand * op) const702 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
703 return ToRepresentation(op, Representation::Integer32());
704 }
705
706
ToRepresentation(LConstantOperand * op,const Representation & r) const707 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
708 const Representation& r) const {
709 HConstant* constant = chunk_->LookupConstant(op);
710 int32_t value = constant->Integer32Value();
711 if (r.IsInteger32()) return value;
712 ASSERT(r.IsSmiOrTagged());
713 return reinterpret_cast<int32_t>(Smi::FromInt(value));
714 }
715
716
ToHandle(LConstantOperand * op) const717 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
718 HConstant* constant = chunk_->LookupConstant(op);
719 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
720 return constant->handle(isolate());
721 }
722
723
ToDouble(LConstantOperand * op) const724 double LCodeGen::ToDouble(LConstantOperand* op) const {
725 HConstant* constant = chunk_->LookupConstant(op);
726 ASSERT(constant->HasDoubleValue());
727 return constant->DoubleValue();
728 }
729
730
ToExternalReference(LConstantOperand * op) const731 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
732 HConstant* constant = chunk_->LookupConstant(op);
733 ASSERT(constant->HasExternalReferenceValue());
734 return constant->ExternalReferenceValue();
735 }
736
737
IsInteger32(LConstantOperand * op) const738 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
739 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
740 }
741
742
IsSmi(LConstantOperand * op) const743 bool LCodeGen::IsSmi(LConstantOperand* op) const {
744 return chunk_->LookupLiteralRepresentation(op).IsSmi();
745 }
746
747
ArgumentsOffsetWithoutFrame(int index)748 static int ArgumentsOffsetWithoutFrame(int index) {
749 ASSERT(index < 0);
750 return -(index + 1) * kPointerSize + kPCOnStackSize;
751 }
752
753
ToOperand(LOperand * op) const754 Operand LCodeGen::ToOperand(LOperand* op) const {
755 if (op->IsRegister()) return Operand(ToRegister(op));
756 ASSERT(!op->IsDoubleRegister());
757 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
758 if (NeedsEagerFrame()) {
759 return Operand(ebp, StackSlotOffset(op->index()));
760 } else {
761 // Retrieve parameter without eager stack-frame relative to the
762 // stack-pointer.
763 return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
764 }
765 }
766
767
HighOperand(LOperand * op)768 Operand LCodeGen::HighOperand(LOperand* op) {
769 ASSERT(op->IsDoubleStackSlot());
770 if (NeedsEagerFrame()) {
771 return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
772 } else {
773 // Retrieve parameter without eager stack-frame relative to the
774 // stack-pointer.
775 return Operand(
776 esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
777 }
778 }
779
780
WriteTranslation(LEnvironment * environment,Translation * translation)781 void LCodeGen::WriteTranslation(LEnvironment* environment,
782 Translation* translation) {
783 if (environment == NULL) return;
784
785 // The translation includes one command per value in the environment.
786 int translation_size = environment->translation_size();
787 // The output frame height does not include the parameters.
788 int height = translation_size - environment->parameter_count();
789
790 WriteTranslation(environment->outer(), translation);
791 bool has_closure_id = !info()->closure().is_null() &&
792 !info()->closure().is_identical_to(environment->closure());
793 int closure_id = has_closure_id
794 ? DefineDeoptimizationLiteral(environment->closure())
795 : Translation::kSelfLiteralId;
796 switch (environment->frame_type()) {
797 case JS_FUNCTION:
798 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
799 break;
800 case JS_CONSTRUCT:
801 translation->BeginConstructStubFrame(closure_id, translation_size);
802 break;
803 case JS_GETTER:
804 ASSERT(translation_size == 1);
805 ASSERT(height == 0);
806 translation->BeginGetterStubFrame(closure_id);
807 break;
808 case JS_SETTER:
809 ASSERT(translation_size == 2);
810 ASSERT(height == 0);
811 translation->BeginSetterStubFrame(closure_id);
812 break;
813 case ARGUMENTS_ADAPTOR:
814 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
815 break;
816 case STUB:
817 translation->BeginCompiledStubFrame();
818 break;
819 default:
820 UNREACHABLE();
821 }
822
823 int object_index = 0;
824 int dematerialized_index = 0;
825 for (int i = 0; i < translation_size; ++i) {
826 LOperand* value = environment->values()->at(i);
827 AddToTranslation(environment,
828 translation,
829 value,
830 environment->HasTaggedValueAt(i),
831 environment->HasUint32ValueAt(i),
832 &object_index,
833 &dematerialized_index);
834 }
835 }
836
837
AddToTranslation(LEnvironment * environment,Translation * translation,LOperand * op,bool is_tagged,bool is_uint32,int * object_index_pointer,int * dematerialized_index_pointer)838 void LCodeGen::AddToTranslation(LEnvironment* environment,
839 Translation* translation,
840 LOperand* op,
841 bool is_tagged,
842 bool is_uint32,
843 int* object_index_pointer,
844 int* dematerialized_index_pointer) {
845 if (op == LEnvironment::materialization_marker()) {
846 int object_index = (*object_index_pointer)++;
847 if (environment->ObjectIsDuplicateAt(object_index)) {
848 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
849 translation->DuplicateObject(dupe_of);
850 return;
851 }
852 int object_length = environment->ObjectLengthAt(object_index);
853 if (environment->ObjectIsArgumentsAt(object_index)) {
854 translation->BeginArgumentsObject(object_length);
855 } else {
856 translation->BeginCapturedObject(object_length);
857 }
858 int dematerialized_index = *dematerialized_index_pointer;
859 int env_offset = environment->translation_size() + dematerialized_index;
860 *dematerialized_index_pointer += object_length;
861 for (int i = 0; i < object_length; ++i) {
862 LOperand* value = environment->values()->at(env_offset + i);
863 AddToTranslation(environment,
864 translation,
865 value,
866 environment->HasTaggedValueAt(env_offset + i),
867 environment->HasUint32ValueAt(env_offset + i),
868 object_index_pointer,
869 dematerialized_index_pointer);
870 }
871 return;
872 }
873
874 if (op->IsStackSlot()) {
875 if (is_tagged) {
876 translation->StoreStackSlot(op->index());
877 } else if (is_uint32) {
878 translation->StoreUint32StackSlot(op->index());
879 } else {
880 translation->StoreInt32StackSlot(op->index());
881 }
882 } else if (op->IsDoubleStackSlot()) {
883 translation->StoreDoubleStackSlot(op->index());
884 } else if (op->IsRegister()) {
885 Register reg = ToRegister(op);
886 if (is_tagged) {
887 translation->StoreRegister(reg);
888 } else if (is_uint32) {
889 translation->StoreUint32Register(reg);
890 } else {
891 translation->StoreInt32Register(reg);
892 }
893 } else if (op->IsConstantOperand()) {
894 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
895 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
896 translation->StoreLiteral(src_index);
897 } else {
898 UNREACHABLE();
899 }
900 }
901
902
CallCodeGeneric(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr,SafepointMode safepoint_mode)903 void LCodeGen::CallCodeGeneric(Handle<Code> code,
904 RelocInfo::Mode mode,
905 LInstruction* instr,
906 SafepointMode safepoint_mode) {
907 ASSERT(instr != NULL);
908 __ call(code, mode);
909 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
910
911 // Signal that we don't inline smi code before these stubs in the
912 // optimizing code generator.
913 if (code->kind() == Code::BINARY_OP_IC ||
914 code->kind() == Code::COMPARE_IC) {
915 __ nop();
916 }
917 }
918
919
CallCode(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr)920 void LCodeGen::CallCode(Handle<Code> code,
921 RelocInfo::Mode mode,
922 LInstruction* instr) {
923 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
924 }
925
926
CallRuntime(const Runtime::Function * fun,int argc,LInstruction * instr)927 void LCodeGen::CallRuntime(const Runtime::Function* fun,
928 int argc,
929 LInstruction* instr) {
930 ASSERT(instr != NULL);
931 ASSERT(instr->HasPointerMap());
932
933 __ CallRuntime(fun, argc);
934
935 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
936
937 ASSERT(info()->is_calling());
938 }
939
940
LoadContextFromDeferred(LOperand * context)941 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
942 if (context->IsRegister()) {
943 if (!ToRegister(context).is(esi)) {
944 __ mov(esi, ToRegister(context));
945 }
946 } else if (context->IsStackSlot()) {
947 __ mov(esi, ToOperand(context));
948 } else if (context->IsConstantOperand()) {
949 HConstant* constant =
950 chunk_->LookupConstant(LConstantOperand::cast(context));
951 __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
952 } else {
953 UNREACHABLE();
954 }
955 }
956
CallRuntimeFromDeferred(Runtime::FunctionId id,int argc,LInstruction * instr,LOperand * context)957 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
958 int argc,
959 LInstruction* instr,
960 LOperand* context) {
961 LoadContextFromDeferred(context);
962
963 __ CallRuntime(id);
964 RecordSafepointWithRegisters(
965 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
966
967 ASSERT(info()->is_calling());
968 }
969
970
RegisterEnvironmentForDeoptimization(LEnvironment * environment,Safepoint::DeoptMode mode)971 void LCodeGen::RegisterEnvironmentForDeoptimization(
972 LEnvironment* environment, Safepoint::DeoptMode mode) {
973 environment->set_has_been_used();
974 if (!environment->HasBeenRegistered()) {
975 // Physical stack frame layout:
976 // -x ............. -4 0 ..................................... y
977 // [incoming arguments] [spill slots] [pushed outgoing arguments]
978
979 // Layout of the environment:
980 // 0 ..................................................... size-1
981 // [parameters] [locals] [expression stack including arguments]
982
983 // Layout of the translation:
984 // 0 ........................................................ size - 1 + 4
985 // [expression stack including arguments] [locals] [4 words] [parameters]
986 // |>------------ translation_size ------------<|
987
988 int frame_count = 0;
989 int jsframe_count = 0;
990 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
991 ++frame_count;
992 if (e->frame_type() == JS_FUNCTION) {
993 ++jsframe_count;
994 }
995 }
996 Translation translation(&translations_, frame_count, jsframe_count, zone());
997 WriteTranslation(environment, &translation);
998 int deoptimization_index = deoptimizations_.length();
999 int pc_offset = masm()->pc_offset();
1000 environment->Register(deoptimization_index,
1001 translation.index(),
1002 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
1003 deoptimizations_.Add(environment, zone());
1004 }
1005 }
1006
1007
DeoptimizeIf(Condition cc,LEnvironment * environment,Deoptimizer::BailoutType bailout_type)1008 void LCodeGen::DeoptimizeIf(Condition cc,
1009 LEnvironment* environment,
1010 Deoptimizer::BailoutType bailout_type) {
1011 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1012 ASSERT(environment->HasBeenRegistered());
1013 int id = environment->deoptimization_index();
1014 ASSERT(info()->IsOptimizing() || info()->IsStub());
1015 Address entry =
1016 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1017 if (entry == NULL) {
1018 Abort(kBailoutWasNotPrepared);
1019 return;
1020 }
1021
1022 if (DeoptEveryNTimes()) {
1023 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1024 Label no_deopt;
1025 __ pushfd();
1026 __ push(eax);
1027 __ mov(eax, Operand::StaticVariable(count));
1028 __ sub(eax, Immediate(1));
1029 __ j(not_zero, &no_deopt, Label::kNear);
1030 if (FLAG_trap_on_deopt) __ int3();
1031 __ mov(eax, Immediate(FLAG_deopt_every_n_times));
1032 __ mov(Operand::StaticVariable(count), eax);
1033 __ pop(eax);
1034 __ popfd();
1035 ASSERT(frame_is_built_);
1036 __ call(entry, RelocInfo::RUNTIME_ENTRY);
1037 __ bind(&no_deopt);
1038 __ mov(Operand::StaticVariable(count), eax);
1039 __ pop(eax);
1040 __ popfd();
1041 }
1042
1043 // Before Instructions which can deopt, we normally flush the x87 stack. But
1044 // we can have inputs or outputs of the current instruction on the stack,
1045 // thus we need to flush them here from the physical stack to leave it in a
1046 // consistent state.
1047 if (x87_stack_.depth() > 0) {
1048 Label done;
1049 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1050 EmitFlushX87ForDeopt();
1051 __ bind(&done);
1052 }
1053
1054 if (info()->ShouldTrapOnDeopt()) {
1055 Label done;
1056 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1057 __ int3();
1058 __ bind(&done);
1059 }
1060
1061 ASSERT(info()->IsStub() || frame_is_built_);
1062 if (cc == no_condition && frame_is_built_) {
1063 __ call(entry, RelocInfo::RUNTIME_ENTRY);
1064 } else {
1065 // We often have several deopts to the same entry, reuse the last
1066 // jump entry if this is the case.
1067 if (jump_table_.is_empty() ||
1068 jump_table_.last().address != entry ||
1069 jump_table_.last().needs_frame != !frame_is_built_ ||
1070 jump_table_.last().bailout_type != bailout_type) {
1071 Deoptimizer::JumpTableEntry table_entry(entry,
1072 bailout_type,
1073 !frame_is_built_);
1074 jump_table_.Add(table_entry, zone());
1075 }
1076 if (cc == no_condition) {
1077 __ jmp(&jump_table_.last().label);
1078 } else {
1079 __ j(cc, &jump_table_.last().label);
1080 }
1081 }
1082 }
1083
1084
DeoptimizeIf(Condition cc,LEnvironment * environment)1085 void LCodeGen::DeoptimizeIf(Condition cc,
1086 LEnvironment* environment) {
1087 Deoptimizer::BailoutType bailout_type = info()->IsStub()
1088 ? Deoptimizer::LAZY
1089 : Deoptimizer::EAGER;
1090 DeoptimizeIf(cc, environment, bailout_type);
1091 }
1092
1093
PopulateDeoptimizationData(Handle<Code> code)1094 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
1095 int length = deoptimizations_.length();
1096 if (length == 0) return;
1097 Handle<DeoptimizationInputData> data =
1098 DeoptimizationInputData::New(isolate(), length, TENURED);
1099
1100 Handle<ByteArray> translations =
1101 translations_.CreateByteArray(isolate()->factory());
1102 data->SetTranslationByteArray(*translations);
1103 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
1104 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
1105 if (info_->IsOptimizing()) {
1106 // Reference to shared function info does not change between phases.
1107 AllowDeferredHandleDereference allow_handle_dereference;
1108 data->SetSharedFunctionInfo(*info_->shared_info());
1109 } else {
1110 data->SetSharedFunctionInfo(Smi::FromInt(0));
1111 }
1112
1113 Handle<FixedArray> literals =
1114 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
1115 { AllowDeferredHandleDereference copy_handles;
1116 for (int i = 0; i < deoptimization_literals_.length(); i++) {
1117 literals->set(i, *deoptimization_literals_[i]);
1118 }
1119 data->SetLiteralArray(*literals);
1120 }
1121
1122 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
1123 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
1124
1125 // Populate the deoptimization entries.
1126 for (int i = 0; i < length; i++) {
1127 LEnvironment* env = deoptimizations_[i];
1128 data->SetAstId(i, env->ast_id());
1129 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
1130 data->SetArgumentsStackHeight(i,
1131 Smi::FromInt(env->arguments_stack_height()));
1132 data->SetPc(i, Smi::FromInt(env->pc_offset()));
1133 }
1134 code->set_deoptimization_data(*data);
1135 }
1136
1137
DefineDeoptimizationLiteral(Handle<Object> literal)1138 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
1139 int result = deoptimization_literals_.length();
1140 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
1141 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
1142 }
1143 deoptimization_literals_.Add(literal, zone());
1144 return result;
1145 }
1146
1147
PopulateDeoptimizationLiteralsWithInlinedFunctions()1148 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
1149 ASSERT(deoptimization_literals_.length() == 0);
1150
1151 const ZoneList<Handle<JSFunction> >* inlined_closures =
1152 chunk()->inlined_closures();
1153
1154 for (int i = 0, length = inlined_closures->length();
1155 i < length;
1156 i++) {
1157 DefineDeoptimizationLiteral(inlined_closures->at(i));
1158 }
1159
1160 inlined_function_count_ = deoptimization_literals_.length();
1161 }
1162
1163
RecordSafepointWithLazyDeopt(LInstruction * instr,SafepointMode safepoint_mode)1164 void LCodeGen::RecordSafepointWithLazyDeopt(
1165 LInstruction* instr, SafepointMode safepoint_mode) {
1166 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
1167 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
1168 } else {
1169 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
1170 RecordSafepointWithRegisters(
1171 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
1172 }
1173 }
1174
1175
RecordSafepoint(LPointerMap * pointers,Safepoint::Kind kind,int arguments,Safepoint::DeoptMode deopt_mode)1176 void LCodeGen::RecordSafepoint(
1177 LPointerMap* pointers,
1178 Safepoint::Kind kind,
1179 int arguments,
1180 Safepoint::DeoptMode deopt_mode) {
1181 ASSERT(kind == expected_safepoint_kind_);
1182 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1183 Safepoint safepoint =
1184 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
1185 for (int i = 0; i < operands->length(); i++) {
1186 LOperand* pointer = operands->at(i);
1187 if (pointer->IsStackSlot()) {
1188 safepoint.DefinePointerSlot(pointer->index(), zone());
1189 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1190 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1191 }
1192 }
1193 }
1194
1195
RecordSafepoint(LPointerMap * pointers,Safepoint::DeoptMode mode)1196 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1197 Safepoint::DeoptMode mode) {
1198 RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
1199 }
1200
1201
RecordSafepoint(Safepoint::DeoptMode mode)1202 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
1203 LPointerMap empty_pointers(zone());
1204 RecordSafepoint(&empty_pointers, mode);
1205 }
1206
1207
RecordSafepointWithRegisters(LPointerMap * pointers,int arguments,Safepoint::DeoptMode mode)1208 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1209 int arguments,
1210 Safepoint::DeoptMode mode) {
1211 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
1212 }
1213
1214
RecordAndWritePosition(int position)1215 void LCodeGen::RecordAndWritePosition(int position) {
1216 if (position == RelocInfo::kNoPosition) return;
1217 masm()->positions_recorder()->RecordPosition(position);
1218 masm()->positions_recorder()->WriteRecordedPositions();
1219 }
1220
1221
LabelType(LLabel * label)1222 static const char* LabelType(LLabel* label) {
1223 if (label->is_loop_header()) return " (loop header)";
1224 if (label->is_osr_entry()) return " (OSR entry)";
1225 return "";
1226 }
1227
1228
DoLabel(LLabel * label)1229 void LCodeGen::DoLabel(LLabel* label) {
1230 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1231 current_instruction_,
1232 label->hydrogen_value()->id(),
1233 label->block_id(),
1234 LabelType(label));
1235 __ bind(label->label());
1236 current_block_ = label->block_id();
1237 DoGap(label);
1238 }
1239
1240
DoParallelMove(LParallelMove * move)1241 void LCodeGen::DoParallelMove(LParallelMove* move) {
1242 resolver_.Resolve(move);
1243 }
1244
1245
DoGap(LGap * gap)1246 void LCodeGen::DoGap(LGap* gap) {
1247 for (int i = LGap::FIRST_INNER_POSITION;
1248 i <= LGap::LAST_INNER_POSITION;
1249 i++) {
1250 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1251 LParallelMove* move = gap->GetParallelMove(inner_pos);
1252 if (move != NULL) DoParallelMove(move);
1253 }
1254 }
1255
1256
DoInstructionGap(LInstructionGap * instr)1257 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1258 DoGap(instr);
1259 }
1260
1261
DoParameter(LParameter * instr)1262 void LCodeGen::DoParameter(LParameter* instr) {
1263 // Nothing to do.
1264 }
1265
1266
DoCallStub(LCallStub * instr)1267 void LCodeGen::DoCallStub(LCallStub* instr) {
1268 ASSERT(ToRegister(instr->context()).is(esi));
1269 ASSERT(ToRegister(instr->result()).is(eax));
1270 switch (instr->hydrogen()->major_key()) {
1271 case CodeStub::RegExpExec: {
1272 RegExpExecStub stub(isolate());
1273 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1274 break;
1275 }
1276 case CodeStub::SubString: {
1277 SubStringStub stub(isolate());
1278 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1279 break;
1280 }
1281 case CodeStub::StringCompare: {
1282 StringCompareStub stub(isolate());
1283 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1284 break;
1285 }
1286 default:
1287 UNREACHABLE();
1288 }
1289 }
1290
1291
DoUnknownOSRValue(LUnknownOSRValue * instr)1292 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1293 GenerateOsrPrologue();
1294 }
1295
1296
DoModByPowerOf2I(LModByPowerOf2I * instr)1297 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1298 Register dividend = ToRegister(instr->dividend());
1299 int32_t divisor = instr->divisor();
1300 ASSERT(dividend.is(ToRegister(instr->result())));
1301
1302 // Theoretically, a variation of the branch-free code for integer division by
1303 // a power of 2 (calculating the remainder via an additional multiplication
1304 // (which gets simplified to an 'and') and subtraction) should be faster, and
1305 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1306 // indicate that positive dividends are heavily favored, so the branching
1307 // version performs better.
1308 HMod* hmod = instr->hydrogen();
1309 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1310 Label dividend_is_not_negative, done;
1311 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1312 __ test(dividend, dividend);
1313 __ j(not_sign, ÷nd_is_not_negative, Label::kNear);
1314 // Note that this is correct even for kMinInt operands.
1315 __ neg(dividend);
1316 __ and_(dividend, mask);
1317 __ neg(dividend);
1318 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1319 DeoptimizeIf(zero, instr->environment());
1320 }
1321 __ jmp(&done, Label::kNear);
1322 }
1323
1324 __ bind(÷nd_is_not_negative);
1325 __ and_(dividend, mask);
1326 __ bind(&done);
1327 }
1328
1329
DoModByConstI(LModByConstI * instr)1330 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1331 Register dividend = ToRegister(instr->dividend());
1332 int32_t divisor = instr->divisor();
1333 ASSERT(ToRegister(instr->result()).is(eax));
1334
1335 if (divisor == 0) {
1336 DeoptimizeIf(no_condition, instr->environment());
1337 return;
1338 }
1339
1340 __ TruncatingDiv(dividend, Abs(divisor));
1341 __ imul(edx, edx, Abs(divisor));
1342 __ mov(eax, dividend);
1343 __ sub(eax, edx);
1344
1345 // Check for negative zero.
1346 HMod* hmod = instr->hydrogen();
1347 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1348 Label remainder_not_zero;
1349 __ j(not_zero, &remainder_not_zero, Label::kNear);
1350 __ cmp(dividend, Immediate(0));
1351 DeoptimizeIf(less, instr->environment());
1352 __ bind(&remainder_not_zero);
1353 }
1354 }
1355
1356
DoModI(LModI * instr)1357 void LCodeGen::DoModI(LModI* instr) {
1358 HMod* hmod = instr->hydrogen();
1359
1360 Register left_reg = ToRegister(instr->left());
1361 ASSERT(left_reg.is(eax));
1362 Register right_reg = ToRegister(instr->right());
1363 ASSERT(!right_reg.is(eax));
1364 ASSERT(!right_reg.is(edx));
1365 Register result_reg = ToRegister(instr->result());
1366 ASSERT(result_reg.is(edx));
1367
1368 Label done;
1369 // Check for x % 0, idiv would signal a divide error. We have to
1370 // deopt in this case because we can't return a NaN.
1371 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1372 __ test(right_reg, Operand(right_reg));
1373 DeoptimizeIf(zero, instr->environment());
1374 }
1375
1376 // Check for kMinInt % -1, idiv would signal a divide error. We
1377 // have to deopt if we care about -0, because we can't return that.
1378 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1379 Label no_overflow_possible;
1380 __ cmp(left_reg, kMinInt);
1381 __ j(not_equal, &no_overflow_possible, Label::kNear);
1382 __ cmp(right_reg, -1);
1383 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1384 DeoptimizeIf(equal, instr->environment());
1385 } else {
1386 __ j(not_equal, &no_overflow_possible, Label::kNear);
1387 __ Move(result_reg, Immediate(0));
1388 __ jmp(&done, Label::kNear);
1389 }
1390 __ bind(&no_overflow_possible);
1391 }
1392
1393 // Sign extend dividend in eax into edx:eax.
1394 __ cdq();
1395
1396 // If we care about -0, test if the dividend is <0 and the result is 0.
1397 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1398 Label positive_left;
1399 __ test(left_reg, Operand(left_reg));
1400 __ j(not_sign, &positive_left, Label::kNear);
1401 __ idiv(right_reg);
1402 __ test(result_reg, Operand(result_reg));
1403 DeoptimizeIf(zero, instr->environment());
1404 __ jmp(&done, Label::kNear);
1405 __ bind(&positive_left);
1406 }
1407 __ idiv(right_reg);
1408 __ bind(&done);
1409 }
1410
1411
DoDivByPowerOf2I(LDivByPowerOf2I * instr)1412 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1413 Register dividend = ToRegister(instr->dividend());
1414 int32_t divisor = instr->divisor();
1415 Register result = ToRegister(instr->result());
1416 ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
1417 ASSERT(!result.is(dividend));
1418
1419 // Check for (0 / -x) that will produce negative zero.
1420 HDiv* hdiv = instr->hydrogen();
1421 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1422 __ test(dividend, dividend);
1423 DeoptimizeIf(zero, instr->environment());
1424 }
1425 // Check for (kMinInt / -1).
1426 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1427 __ cmp(dividend, kMinInt);
1428 DeoptimizeIf(zero, instr->environment());
1429 }
1430 // Deoptimize if remainder will not be 0.
1431 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1432 divisor != 1 && divisor != -1) {
1433 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1434 __ test(dividend, Immediate(mask));
1435 DeoptimizeIf(not_zero, instr->environment());
1436 }
1437 __ Move(result, dividend);
1438 int32_t shift = WhichPowerOf2Abs(divisor);
1439 if (shift > 0) {
1440 // The arithmetic shift is always OK, the 'if' is an optimization only.
1441 if (shift > 1) __ sar(result, 31);
1442 __ shr(result, 32 - shift);
1443 __ add(result, dividend);
1444 __ sar(result, shift);
1445 }
1446 if (divisor < 0) __ neg(result);
1447 }
1448
1449
DoDivByConstI(LDivByConstI * instr)1450 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1451 Register dividend = ToRegister(instr->dividend());
1452 int32_t divisor = instr->divisor();
1453 ASSERT(ToRegister(instr->result()).is(edx));
1454
1455 if (divisor == 0) {
1456 DeoptimizeIf(no_condition, instr->environment());
1457 return;
1458 }
1459
1460 // Check for (0 / -x) that will produce negative zero.
1461 HDiv* hdiv = instr->hydrogen();
1462 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1463 __ test(dividend, dividend);
1464 DeoptimizeIf(zero, instr->environment());
1465 }
1466
1467 __ TruncatingDiv(dividend, Abs(divisor));
1468 if (divisor < 0) __ neg(edx);
1469
1470 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1471 __ mov(eax, edx);
1472 __ imul(eax, eax, divisor);
1473 __ sub(eax, dividend);
1474 DeoptimizeIf(not_equal, instr->environment());
1475 }
1476 }
1477
1478
1479 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
DoDivI(LDivI * instr)1480 void LCodeGen::DoDivI(LDivI* instr) {
1481 HBinaryOperation* hdiv = instr->hydrogen();
1482 Register dividend = ToRegister(instr->dividend());
1483 Register divisor = ToRegister(instr->divisor());
1484 Register remainder = ToRegister(instr->temp());
1485 ASSERT(dividend.is(eax));
1486 ASSERT(remainder.is(edx));
1487 ASSERT(ToRegister(instr->result()).is(eax));
1488 ASSERT(!divisor.is(eax));
1489 ASSERT(!divisor.is(edx));
1490
1491 // Check for x / 0.
1492 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1493 __ test(divisor, divisor);
1494 DeoptimizeIf(zero, instr->environment());
1495 }
1496
1497 // Check for (0 / -x) that will produce negative zero.
1498 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1499 Label dividend_not_zero;
1500 __ test(dividend, dividend);
1501 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1502 __ test(divisor, divisor);
1503 DeoptimizeIf(sign, instr->environment());
1504 __ bind(÷nd_not_zero);
1505 }
1506
1507 // Check for (kMinInt / -1).
1508 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1509 Label dividend_not_min_int;
1510 __ cmp(dividend, kMinInt);
1511 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1512 __ cmp(divisor, -1);
1513 DeoptimizeIf(zero, instr->environment());
1514 __ bind(÷nd_not_min_int);
1515 }
1516
1517 // Sign extend to edx (= remainder).
1518 __ cdq();
1519 __ idiv(divisor);
1520
1521 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1522 // Deoptimize if remainder is not 0.
1523 __ test(remainder, remainder);
1524 DeoptimizeIf(not_zero, instr->environment());
1525 }
1526 }
1527
1528
DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I * instr)1529 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1530 Register dividend = ToRegister(instr->dividend());
1531 int32_t divisor = instr->divisor();
1532 ASSERT(dividend.is(ToRegister(instr->result())));
1533
1534 // If the divisor is positive, things are easy: There can be no deopts and we
1535 // can simply do an arithmetic right shift.
1536 if (divisor == 1) return;
1537 int32_t shift = WhichPowerOf2Abs(divisor);
1538 if (divisor > 1) {
1539 __ sar(dividend, shift);
1540 return;
1541 }
1542
1543 // If the divisor is negative, we have to negate and handle edge cases.
1544 __ neg(dividend);
1545 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1546 DeoptimizeIf(zero, instr->environment());
1547 }
1548
1549 // Dividing by -1 is basically negation, unless we overflow.
1550 if (divisor == -1) {
1551 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1552 DeoptimizeIf(overflow, instr->environment());
1553 }
1554 return;
1555 }
1556
1557 // If the negation could not overflow, simply shifting is OK.
1558 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1559 __ sar(dividend, shift);
1560 return;
1561 }
1562
1563 Label not_kmin_int, done;
1564 __ j(no_overflow, ¬_kmin_int, Label::kNear);
1565 __ mov(dividend, Immediate(kMinInt / divisor));
1566 __ jmp(&done, Label::kNear);
1567 __ bind(¬_kmin_int);
1568 __ sar(dividend, shift);
1569 __ bind(&done);
1570 }
1571
1572
DoFlooringDivByConstI(LFlooringDivByConstI * instr)1573 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1574 Register dividend = ToRegister(instr->dividend());
1575 int32_t divisor = instr->divisor();
1576 ASSERT(ToRegister(instr->result()).is(edx));
1577
1578 if (divisor == 0) {
1579 DeoptimizeIf(no_condition, instr->environment());
1580 return;
1581 }
1582
1583 // Check for (0 / -x) that will produce negative zero.
1584 HMathFloorOfDiv* hdiv = instr->hydrogen();
1585 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1586 __ test(dividend, dividend);
1587 DeoptimizeIf(zero, instr->environment());
1588 }
1589
1590 // Easy case: We need no dynamic check for the dividend and the flooring
1591 // division is the same as the truncating division.
1592 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1593 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1594 __ TruncatingDiv(dividend, Abs(divisor));
1595 if (divisor < 0) __ neg(edx);
1596 return;
1597 }
1598
1599 // In the general case we may need to adjust before and after the truncating
1600 // division to get a flooring division.
1601 Register temp = ToRegister(instr->temp3());
1602 ASSERT(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
1603 Label needs_adjustment, done;
1604 __ cmp(dividend, Immediate(0));
1605 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1606 __ TruncatingDiv(dividend, Abs(divisor));
1607 if (divisor < 0) __ neg(edx);
1608 __ jmp(&done, Label::kNear);
1609 __ bind(&needs_adjustment);
1610 __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1611 __ TruncatingDiv(temp, Abs(divisor));
1612 if (divisor < 0) __ neg(edx);
1613 __ dec(edx);
1614 __ bind(&done);
1615 }
1616
1617
1618 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
DoFlooringDivI(LFlooringDivI * instr)1619 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1620 HBinaryOperation* hdiv = instr->hydrogen();
1621 Register dividend = ToRegister(instr->dividend());
1622 Register divisor = ToRegister(instr->divisor());
1623 Register remainder = ToRegister(instr->temp());
1624 Register result = ToRegister(instr->result());
1625 ASSERT(dividend.is(eax));
1626 ASSERT(remainder.is(edx));
1627 ASSERT(result.is(eax));
1628 ASSERT(!divisor.is(eax));
1629 ASSERT(!divisor.is(edx));
1630
1631 // Check for x / 0.
1632 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1633 __ test(divisor, divisor);
1634 DeoptimizeIf(zero, instr->environment());
1635 }
1636
1637 // Check for (0 / -x) that will produce negative zero.
1638 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1639 Label dividend_not_zero;
1640 __ test(dividend, dividend);
1641 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1642 __ test(divisor, divisor);
1643 DeoptimizeIf(sign, instr->environment());
1644 __ bind(÷nd_not_zero);
1645 }
1646
1647 // Check for (kMinInt / -1).
1648 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1649 Label dividend_not_min_int;
1650 __ cmp(dividend, kMinInt);
1651 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1652 __ cmp(divisor, -1);
1653 DeoptimizeIf(zero, instr->environment());
1654 __ bind(÷nd_not_min_int);
1655 }
1656
1657 // Sign extend to edx (= remainder).
1658 __ cdq();
1659 __ idiv(divisor);
1660
1661 Label done;
1662 __ test(remainder, remainder);
1663 __ j(zero, &done, Label::kNear);
1664 __ xor_(remainder, divisor);
1665 __ sar(remainder, 31);
1666 __ add(result, remainder);
1667 __ bind(&done);
1668 }
1669
1670
DoMulI(LMulI * instr)1671 void LCodeGen::DoMulI(LMulI* instr) {
1672 Register left = ToRegister(instr->left());
1673 LOperand* right = instr->right();
1674
1675 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1676 __ mov(ToRegister(instr->temp()), left);
1677 }
1678
1679 if (right->IsConstantOperand()) {
1680 // Try strength reductions on the multiplication.
1681 // All replacement instructions are at most as long as the imul
1682 // and have better latency.
1683 int constant = ToInteger32(LConstantOperand::cast(right));
1684 if (constant == -1) {
1685 __ neg(left);
1686 } else if (constant == 0) {
1687 __ xor_(left, Operand(left));
1688 } else if (constant == 2) {
1689 __ add(left, Operand(left));
1690 } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1691 // If we know that the multiplication can't overflow, it's safe to
1692 // use instructions that don't set the overflow flag for the
1693 // multiplication.
1694 switch (constant) {
1695 case 1:
1696 // Do nothing.
1697 break;
1698 case 3:
1699 __ lea(left, Operand(left, left, times_2, 0));
1700 break;
1701 case 4:
1702 __ shl(left, 2);
1703 break;
1704 case 5:
1705 __ lea(left, Operand(left, left, times_4, 0));
1706 break;
1707 case 8:
1708 __ shl(left, 3);
1709 break;
1710 case 9:
1711 __ lea(left, Operand(left, left, times_8, 0));
1712 break;
1713 case 16:
1714 __ shl(left, 4);
1715 break;
1716 default:
1717 __ imul(left, left, constant);
1718 break;
1719 }
1720 } else {
1721 __ imul(left, left, constant);
1722 }
1723 } else {
1724 if (instr->hydrogen()->representation().IsSmi()) {
1725 __ SmiUntag(left);
1726 }
1727 __ imul(left, ToOperand(right));
1728 }
1729
1730 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1731 DeoptimizeIf(overflow, instr->environment());
1732 }
1733
1734 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1735 // Bail out if the result is supposed to be negative zero.
1736 Label done;
1737 __ test(left, Operand(left));
1738 __ j(not_zero, &done, Label::kNear);
1739 if (right->IsConstantOperand()) {
1740 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1741 DeoptimizeIf(no_condition, instr->environment());
1742 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1743 __ cmp(ToRegister(instr->temp()), Immediate(0));
1744 DeoptimizeIf(less, instr->environment());
1745 }
1746 } else {
1747 // Test the non-zero operand for negative sign.
1748 __ or_(ToRegister(instr->temp()), ToOperand(right));
1749 DeoptimizeIf(sign, instr->environment());
1750 }
1751 __ bind(&done);
1752 }
1753 }
1754
1755
DoBitI(LBitI * instr)1756 void LCodeGen::DoBitI(LBitI* instr) {
1757 LOperand* left = instr->left();
1758 LOperand* right = instr->right();
1759 ASSERT(left->Equals(instr->result()));
1760 ASSERT(left->IsRegister());
1761
1762 if (right->IsConstantOperand()) {
1763 int32_t right_operand =
1764 ToRepresentation(LConstantOperand::cast(right),
1765 instr->hydrogen()->representation());
1766 switch (instr->op()) {
1767 case Token::BIT_AND:
1768 __ and_(ToRegister(left), right_operand);
1769 break;
1770 case Token::BIT_OR:
1771 __ or_(ToRegister(left), right_operand);
1772 break;
1773 case Token::BIT_XOR:
1774 if (right_operand == int32_t(~0)) {
1775 __ not_(ToRegister(left));
1776 } else {
1777 __ xor_(ToRegister(left), right_operand);
1778 }
1779 break;
1780 default:
1781 UNREACHABLE();
1782 break;
1783 }
1784 } else {
1785 switch (instr->op()) {
1786 case Token::BIT_AND:
1787 __ and_(ToRegister(left), ToOperand(right));
1788 break;
1789 case Token::BIT_OR:
1790 __ or_(ToRegister(left), ToOperand(right));
1791 break;
1792 case Token::BIT_XOR:
1793 __ xor_(ToRegister(left), ToOperand(right));
1794 break;
1795 default:
1796 UNREACHABLE();
1797 break;
1798 }
1799 }
1800 }
1801
1802
DoShiftI(LShiftI * instr)1803 void LCodeGen::DoShiftI(LShiftI* instr) {
1804 LOperand* left = instr->left();
1805 LOperand* right = instr->right();
1806 ASSERT(left->Equals(instr->result()));
1807 ASSERT(left->IsRegister());
1808 if (right->IsRegister()) {
1809 ASSERT(ToRegister(right).is(ecx));
1810
1811 switch (instr->op()) {
1812 case Token::ROR:
1813 __ ror_cl(ToRegister(left));
1814 if (instr->can_deopt()) {
1815 __ test(ToRegister(left), ToRegister(left));
1816 DeoptimizeIf(sign, instr->environment());
1817 }
1818 break;
1819 case Token::SAR:
1820 __ sar_cl(ToRegister(left));
1821 break;
1822 case Token::SHR:
1823 __ shr_cl(ToRegister(left));
1824 if (instr->can_deopt()) {
1825 __ test(ToRegister(left), ToRegister(left));
1826 DeoptimizeIf(sign, instr->environment());
1827 }
1828 break;
1829 case Token::SHL:
1830 __ shl_cl(ToRegister(left));
1831 break;
1832 default:
1833 UNREACHABLE();
1834 break;
1835 }
1836 } else {
1837 int value = ToInteger32(LConstantOperand::cast(right));
1838 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1839 switch (instr->op()) {
1840 case Token::ROR:
1841 if (shift_count == 0 && instr->can_deopt()) {
1842 __ test(ToRegister(left), ToRegister(left));
1843 DeoptimizeIf(sign, instr->environment());
1844 } else {
1845 __ ror(ToRegister(left), shift_count);
1846 }
1847 break;
1848 case Token::SAR:
1849 if (shift_count != 0) {
1850 __ sar(ToRegister(left), shift_count);
1851 }
1852 break;
1853 case Token::SHR:
1854 if (shift_count != 0) {
1855 __ shr(ToRegister(left), shift_count);
1856 } else if (instr->can_deopt()) {
1857 __ test(ToRegister(left), ToRegister(left));
1858 DeoptimizeIf(sign, instr->environment());
1859 }
1860 break;
1861 case Token::SHL:
1862 if (shift_count != 0) {
1863 if (instr->hydrogen_value()->representation().IsSmi() &&
1864 instr->can_deopt()) {
1865 if (shift_count != 1) {
1866 __ shl(ToRegister(left), shift_count - 1);
1867 }
1868 __ SmiTag(ToRegister(left));
1869 DeoptimizeIf(overflow, instr->environment());
1870 } else {
1871 __ shl(ToRegister(left), shift_count);
1872 }
1873 }
1874 break;
1875 default:
1876 UNREACHABLE();
1877 break;
1878 }
1879 }
1880 }
1881
1882
DoSubI(LSubI * instr)1883 void LCodeGen::DoSubI(LSubI* instr) {
1884 LOperand* left = instr->left();
1885 LOperand* right = instr->right();
1886 ASSERT(left->Equals(instr->result()));
1887
1888 if (right->IsConstantOperand()) {
1889 __ sub(ToOperand(left),
1890 ToImmediate(right, instr->hydrogen()->representation()));
1891 } else {
1892 __ sub(ToRegister(left), ToOperand(right));
1893 }
1894 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1895 DeoptimizeIf(overflow, instr->environment());
1896 }
1897 }
1898
1899
DoConstantI(LConstantI * instr)1900 void LCodeGen::DoConstantI(LConstantI* instr) {
1901 __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1902 }
1903
1904
DoConstantS(LConstantS * instr)1905 void LCodeGen::DoConstantS(LConstantS* instr) {
1906 __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1907 }
1908
1909
DoConstantD(LConstantD * instr)1910 void LCodeGen::DoConstantD(LConstantD* instr) {
1911 double v = instr->value();
1912 uint64_t int_val = BitCast<uint64_t, double>(v);
1913 int32_t lower = static_cast<int32_t>(int_val);
1914 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
1915 ASSERT(instr->result()->IsDoubleRegister());
1916
1917 __ push(Immediate(upper));
1918 __ push(Immediate(lower));
1919 X87Register reg = ToX87Register(instr->result());
1920 X87Mov(reg, Operand(esp, 0));
1921 __ add(Operand(esp), Immediate(kDoubleSize));
1922 }
1923
1924
DoConstantE(LConstantE * instr)1925 void LCodeGen::DoConstantE(LConstantE* instr) {
1926 __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
1927 }
1928
1929
DoConstantT(LConstantT * instr)1930 void LCodeGen::DoConstantT(LConstantT* instr) {
1931 Register reg = ToRegister(instr->result());
1932 Handle<Object> object = instr->value(isolate());
1933 AllowDeferredHandleDereference smi_check;
1934 __ LoadObject(reg, object);
1935 }
1936
1937
DoMapEnumLength(LMapEnumLength * instr)1938 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1939 Register result = ToRegister(instr->result());
1940 Register map = ToRegister(instr->value());
1941 __ EnumLength(result, map);
1942 }
1943
1944
DoDateField(LDateField * instr)1945 void LCodeGen::DoDateField(LDateField* instr) {
1946 Register object = ToRegister(instr->date());
1947 Register result = ToRegister(instr->result());
1948 Register scratch = ToRegister(instr->temp());
1949 Smi* index = instr->index();
1950 Label runtime, done;
1951 ASSERT(object.is(result));
1952 ASSERT(object.is(eax));
1953
1954 __ test(object, Immediate(kSmiTagMask));
1955 DeoptimizeIf(zero, instr->environment());
1956 __ CmpObjectType(object, JS_DATE_TYPE, scratch);
1957 DeoptimizeIf(not_equal, instr->environment());
1958
1959 if (index->value() == 0) {
1960 __ mov(result, FieldOperand(object, JSDate::kValueOffset));
1961 } else {
1962 if (index->value() < JSDate::kFirstUncachedField) {
1963 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1964 __ mov(scratch, Operand::StaticVariable(stamp));
1965 __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
1966 __ j(not_equal, &runtime, Label::kNear);
1967 __ mov(result, FieldOperand(object, JSDate::kValueOffset +
1968 kPointerSize * index->value()));
1969 __ jmp(&done, Label::kNear);
1970 }
1971 __ bind(&runtime);
1972 __ PrepareCallCFunction(2, scratch);
1973 __ mov(Operand(esp, 0), object);
1974 __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
1975 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1976 __ bind(&done);
1977 }
1978 }
1979
1980
BuildSeqStringOperand(Register string,LOperand * index,String::Encoding encoding)1981 Operand LCodeGen::BuildSeqStringOperand(Register string,
1982 LOperand* index,
1983 String::Encoding encoding) {
1984 if (index->IsConstantOperand()) {
1985 int offset = ToRepresentation(LConstantOperand::cast(index),
1986 Representation::Integer32());
1987 if (encoding == String::TWO_BYTE_ENCODING) {
1988 offset *= kUC16Size;
1989 }
1990 STATIC_ASSERT(kCharSize == 1);
1991 return FieldOperand(string, SeqString::kHeaderSize + offset);
1992 }
1993 return FieldOperand(
1994 string, ToRegister(index),
1995 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1996 SeqString::kHeaderSize);
1997 }
1998
1999
DoSeqStringGetChar(LSeqStringGetChar * instr)2000 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
2001 String::Encoding encoding = instr->hydrogen()->encoding();
2002 Register result = ToRegister(instr->result());
2003 Register string = ToRegister(instr->string());
2004
2005 if (FLAG_debug_code) {
2006 __ push(string);
2007 __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
2008 __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
2009
2010 __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
2011 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2012 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2013 __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
2014 ? one_byte_seq_type : two_byte_seq_type));
2015 __ Check(equal, kUnexpectedStringType);
2016 __ pop(string);
2017 }
2018
2019 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2020 if (encoding == String::ONE_BYTE_ENCODING) {
2021 __ movzx_b(result, operand);
2022 } else {
2023 __ movzx_w(result, operand);
2024 }
2025 }
2026
2027
DoSeqStringSetChar(LSeqStringSetChar * instr)2028 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2029 String::Encoding encoding = instr->hydrogen()->encoding();
2030 Register string = ToRegister(instr->string());
2031
2032 if (FLAG_debug_code) {
2033 Register value = ToRegister(instr->value());
2034 Register index = ToRegister(instr->index());
2035 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2036 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2037 int encoding_mask =
2038 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
2039 ? one_byte_seq_type : two_byte_seq_type;
2040 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
2041 }
2042
2043 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2044 if (instr->value()->IsConstantOperand()) {
2045 int value = ToRepresentation(LConstantOperand::cast(instr->value()),
2046 Representation::Integer32());
2047 ASSERT_LE(0, value);
2048 if (encoding == String::ONE_BYTE_ENCODING) {
2049 ASSERT_LE(value, String::kMaxOneByteCharCode);
2050 __ mov_b(operand, static_cast<int8_t>(value));
2051 } else {
2052 ASSERT_LE(value, String::kMaxUtf16CodeUnit);
2053 __ mov_w(operand, static_cast<int16_t>(value));
2054 }
2055 } else {
2056 Register value = ToRegister(instr->value());
2057 if (encoding == String::ONE_BYTE_ENCODING) {
2058 __ mov_b(operand, value);
2059 } else {
2060 __ mov_w(operand, value);
2061 }
2062 }
2063 }
2064
2065
DoAddI(LAddI * instr)2066 void LCodeGen::DoAddI(LAddI* instr) {
2067 LOperand* left = instr->left();
2068 LOperand* right = instr->right();
2069
2070 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
2071 if (right->IsConstantOperand()) {
2072 int32_t offset = ToRepresentation(LConstantOperand::cast(right),
2073 instr->hydrogen()->representation());
2074 __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
2075 } else {
2076 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
2077 __ lea(ToRegister(instr->result()), address);
2078 }
2079 } else {
2080 if (right->IsConstantOperand()) {
2081 __ add(ToOperand(left),
2082 ToImmediate(right, instr->hydrogen()->representation()));
2083 } else {
2084 __ add(ToRegister(left), ToOperand(right));
2085 }
2086 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
2087 DeoptimizeIf(overflow, instr->environment());
2088 }
2089 }
2090 }
2091
2092
DoMathMinMax(LMathMinMax * instr)2093 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2094 LOperand* left = instr->left();
2095 LOperand* right = instr->right();
2096 ASSERT(left->Equals(instr->result()));
2097 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2098 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2099 Label return_left;
2100 Condition condition = (operation == HMathMinMax::kMathMin)
2101 ? less_equal
2102 : greater_equal;
2103 if (right->IsConstantOperand()) {
2104 Operand left_op = ToOperand(left);
2105 Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
2106 instr->hydrogen()->representation());
2107 __ cmp(left_op, immediate);
2108 __ j(condition, &return_left, Label::kNear);
2109 __ mov(left_op, immediate);
2110 } else {
2111 Register left_reg = ToRegister(left);
2112 Operand right_op = ToOperand(right);
2113 __ cmp(left_reg, right_op);
2114 __ j(condition, &return_left, Label::kNear);
2115 __ mov(left_reg, right_op);
2116 }
2117 __ bind(&return_left);
2118 } else {
2119 // TODO(weiliang) use X87 for double representation.
2120 UNIMPLEMENTED();
2121 }
2122 }
2123
2124
DoArithmeticD(LArithmeticD * instr)2125 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2126 X87Register left = ToX87Register(instr->left());
2127 X87Register right = ToX87Register(instr->right());
2128 X87Register result = ToX87Register(instr->result());
2129 if (instr->op() != Token::MOD) {
2130 X87PrepareBinaryOp(left, right, result);
2131 }
2132 switch (instr->op()) {
2133 case Token::ADD:
2134 __ fadd_i(1);
2135 break;
2136 case Token::SUB:
2137 __ fsub_i(1);
2138 break;
2139 case Token::MUL:
2140 __ fmul_i(1);
2141 break;
2142 case Token::DIV:
2143 __ fdiv_i(1);
2144 break;
2145 case Token::MOD: {
2146 // Pass two doubles as arguments on the stack.
2147 __ PrepareCallCFunction(4, eax);
2148 X87Mov(Operand(esp, 1 * kDoubleSize), right);
2149 X87Mov(Operand(esp, 0), left);
2150 X87Free(right);
2151 ASSERT(left.is(result));
2152 X87PrepareToWrite(result);
2153 __ CallCFunction(
2154 ExternalReference::mod_two_doubles_operation(isolate()),
2155 4);
2156
2157 // Return value is in st(0) on ia32.
2158 X87CommitWrite(result);
2159 break;
2160 }
2161 default:
2162 UNREACHABLE();
2163 break;
2164 }
2165 }
2166
2167
DoArithmeticT(LArithmeticT * instr)2168 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2169 ASSERT(ToRegister(instr->context()).is(esi));
2170 ASSERT(ToRegister(instr->left()).is(edx));
2171 ASSERT(ToRegister(instr->right()).is(eax));
2172 ASSERT(ToRegister(instr->result()).is(eax));
2173
2174 BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
2175 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2176 }
2177
2178
2179 template<class InstrType>
EmitBranch(InstrType instr,Condition cc)2180 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2181 int left_block = instr->TrueDestination(chunk_);
2182 int right_block = instr->FalseDestination(chunk_);
2183
2184 int next_block = GetNextEmittedBlock();
2185
2186 if (right_block == left_block || cc == no_condition) {
2187 EmitGoto(left_block);
2188 } else if (left_block == next_block) {
2189 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2190 } else if (right_block == next_block) {
2191 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2192 } else {
2193 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2194 __ jmp(chunk_->GetAssemblyLabel(right_block));
2195 }
2196 }
2197
2198
2199 template<class InstrType>
EmitFalseBranch(InstrType instr,Condition cc)2200 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2201 int false_block = instr->FalseDestination(chunk_);
2202 if (cc == no_condition) {
2203 __ jmp(chunk_->GetAssemblyLabel(false_block));
2204 } else {
2205 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2206 }
2207 }
2208
2209
DoBranch(LBranch * instr)2210 void LCodeGen::DoBranch(LBranch* instr) {
2211 Representation r = instr->hydrogen()->value()->representation();
2212 if (r.IsSmiOrInteger32()) {
2213 Register reg = ToRegister(instr->value());
2214 __ test(reg, Operand(reg));
2215 EmitBranch(instr, not_zero);
2216 } else if (r.IsDouble()) {
2217 UNREACHABLE();
2218 } else {
2219 ASSERT(r.IsTagged());
2220 Register reg = ToRegister(instr->value());
2221 HType type = instr->hydrogen()->value()->type();
2222 if (type.IsBoolean()) {
2223 ASSERT(!info()->IsStub());
2224 __ cmp(reg, factory()->true_value());
2225 EmitBranch(instr, equal);
2226 } else if (type.IsSmi()) {
2227 ASSERT(!info()->IsStub());
2228 __ test(reg, Operand(reg));
2229 EmitBranch(instr, not_equal);
2230 } else if (type.IsJSArray()) {
2231 ASSERT(!info()->IsStub());
2232 EmitBranch(instr, no_condition);
2233 } else if (type.IsHeapNumber()) {
2234 UNREACHABLE();
2235 } else if (type.IsString()) {
2236 ASSERT(!info()->IsStub());
2237 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2238 EmitBranch(instr, not_equal);
2239 } else {
2240 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2241 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2242
2243 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2244 // undefined -> false.
2245 __ cmp(reg, factory()->undefined_value());
2246 __ j(equal, instr->FalseLabel(chunk_));
2247 }
2248 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2249 // true -> true.
2250 __ cmp(reg, factory()->true_value());
2251 __ j(equal, instr->TrueLabel(chunk_));
2252 // false -> false.
2253 __ cmp(reg, factory()->false_value());
2254 __ j(equal, instr->FalseLabel(chunk_));
2255 }
2256 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2257 // 'null' -> false.
2258 __ cmp(reg, factory()->null_value());
2259 __ j(equal, instr->FalseLabel(chunk_));
2260 }
2261
2262 if (expected.Contains(ToBooleanStub::SMI)) {
2263 // Smis: 0 -> false, all other -> true.
2264 __ test(reg, Operand(reg));
2265 __ j(equal, instr->FalseLabel(chunk_));
2266 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2267 } else if (expected.NeedsMap()) {
2268 // If we need a map later and have a Smi -> deopt.
2269 __ test(reg, Immediate(kSmiTagMask));
2270 DeoptimizeIf(zero, instr->environment());
2271 }
2272
2273 Register map = no_reg; // Keep the compiler happy.
2274 if (expected.NeedsMap()) {
2275 map = ToRegister(instr->temp());
2276 ASSERT(!map.is(reg));
2277 __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
2278
2279 if (expected.CanBeUndetectable()) {
2280 // Undetectable -> false.
2281 __ test_b(FieldOperand(map, Map::kBitFieldOffset),
2282 1 << Map::kIsUndetectable);
2283 __ j(not_zero, instr->FalseLabel(chunk_));
2284 }
2285 }
2286
2287 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2288 // spec object -> true.
2289 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2290 __ j(above_equal, instr->TrueLabel(chunk_));
2291 }
2292
2293 if (expected.Contains(ToBooleanStub::STRING)) {
2294 // String value -> false iff empty.
2295 Label not_string;
2296 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2297 __ j(above_equal, ¬_string, Label::kNear);
2298 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2299 __ j(not_zero, instr->TrueLabel(chunk_));
2300 __ jmp(instr->FalseLabel(chunk_));
2301 __ bind(¬_string);
2302 }
2303
2304 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2305 // Symbol value -> true.
2306 __ CmpInstanceType(map, SYMBOL_TYPE);
2307 __ j(equal, instr->TrueLabel(chunk_));
2308 }
2309
2310 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2311 // heap number -> false iff +0, -0, or NaN.
2312 Label not_heap_number;
2313 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2314 factory()->heap_number_map());
2315 __ j(not_equal, ¬_heap_number, Label::kNear);
2316 __ fldz();
2317 __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
2318 __ FCmp();
2319 __ j(zero, instr->FalseLabel(chunk_));
2320 __ jmp(instr->TrueLabel(chunk_));
2321 __ bind(¬_heap_number);
2322 }
2323
2324 if (!expected.IsGeneric()) {
2325 // We've seen something for the first time -> deopt.
2326 // This can only happen if we are not generic already.
2327 DeoptimizeIf(no_condition, instr->environment());
2328 }
2329 }
2330 }
2331 }
2332
2333
EmitGoto(int block)2334 void LCodeGen::EmitGoto(int block) {
2335 if (!IsNextEmittedBlock(block)) {
2336 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2337 }
2338 }
2339
2340
DoClobberDoubles(LClobberDoubles * instr)2341 void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
2342 }
2343
2344
DoGoto(LGoto * instr)2345 void LCodeGen::DoGoto(LGoto* instr) {
2346 EmitGoto(instr->block_id());
2347 }
2348
2349
TokenToCondition(Token::Value op,bool is_unsigned)2350 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2351 Condition cond = no_condition;
2352 switch (op) {
2353 case Token::EQ:
2354 case Token::EQ_STRICT:
2355 cond = equal;
2356 break;
2357 case Token::NE:
2358 case Token::NE_STRICT:
2359 cond = not_equal;
2360 break;
2361 case Token::LT:
2362 cond = is_unsigned ? below : less;
2363 break;
2364 case Token::GT:
2365 cond = is_unsigned ? above : greater;
2366 break;
2367 case Token::LTE:
2368 cond = is_unsigned ? below_equal : less_equal;
2369 break;
2370 case Token::GTE:
2371 cond = is_unsigned ? above_equal : greater_equal;
2372 break;
2373 case Token::IN:
2374 case Token::INSTANCEOF:
2375 default:
2376 UNREACHABLE();
2377 }
2378 return cond;
2379 }
2380
2381
DoCompareNumericAndBranch(LCompareNumericAndBranch * instr)2382 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2383 LOperand* left = instr->left();
2384 LOperand* right = instr->right();
2385 bool is_unsigned =
2386 instr->is_double() ||
2387 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2388 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2389 Condition cc = TokenToCondition(instr->op(), is_unsigned);
2390
2391 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2392 // We can statically evaluate the comparison.
2393 double left_val = ToDouble(LConstantOperand::cast(left));
2394 double right_val = ToDouble(LConstantOperand::cast(right));
2395 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2396 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2397 EmitGoto(next_block);
2398 } else {
2399 if (instr->is_double()) {
2400 X87LoadForUsage(ToX87Register(right), ToX87Register(left));
2401 __ FCmp();
2402 // Don't base result on EFLAGS when a NaN is involved. Instead
2403 // jump to the false block.
2404 __ j(parity_even, instr->FalseLabel(chunk_));
2405 } else {
2406 if (right->IsConstantOperand()) {
2407 __ cmp(ToOperand(left),
2408 ToImmediate(right, instr->hydrogen()->representation()));
2409 } else if (left->IsConstantOperand()) {
2410 __ cmp(ToOperand(right),
2411 ToImmediate(left, instr->hydrogen()->representation()));
2412 // We commuted the operands, so commute the condition.
2413 cc = CommuteCondition(cc);
2414 } else {
2415 __ cmp(ToRegister(left), ToOperand(right));
2416 }
2417 }
2418 EmitBranch(instr, cc);
2419 }
2420 }
2421
2422
DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch * instr)2423 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2424 Register left = ToRegister(instr->left());
2425
2426 if (instr->right()->IsConstantOperand()) {
2427 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2428 __ CmpObject(left, right);
2429 } else {
2430 Operand right = ToOperand(instr->right());
2431 __ cmp(left, right);
2432 }
2433 EmitBranch(instr, equal);
2434 }
2435
2436
DoCmpHoleAndBranch(LCmpHoleAndBranch * instr)2437 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2438 if (instr->hydrogen()->representation().IsTagged()) {
2439 Register input_reg = ToRegister(instr->object());
2440 __ cmp(input_reg, factory()->the_hole_value());
2441 EmitBranch(instr, equal);
2442 return;
2443 }
2444
2445 // Put the value to the top of stack
2446 X87Register src = ToX87Register(instr->object());
2447 X87LoadForUsage(src);
2448 __ fld(0);
2449 __ fld(0);
2450 __ FCmp();
2451 Label ok;
2452 __ j(parity_even, &ok, Label::kNear);
2453 __ fstp(0);
2454 EmitFalseBranch(instr, no_condition);
2455 __ bind(&ok);
2456
2457
2458 __ sub(esp, Immediate(kDoubleSize));
2459 __ fstp_d(MemOperand(esp, 0));
2460
2461 __ add(esp, Immediate(kDoubleSize));
2462 int offset = sizeof(kHoleNanUpper32);
2463 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
2464 EmitBranch(instr, equal);
2465 }
2466
2467
DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch * instr)2468 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2469 Representation rep = instr->hydrogen()->value()->representation();
2470 ASSERT(!rep.IsInteger32());
2471
2472 if (rep.IsDouble()) {
2473 UNREACHABLE();
2474 } else {
2475 Register value = ToRegister(instr->value());
2476 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2477 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2478 __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
2479 Immediate(0x1));
2480 EmitFalseBranch(instr, no_overflow);
2481 __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
2482 Immediate(0x00000000));
2483 EmitBranch(instr, equal);
2484 }
2485 }
2486
2487
EmitIsObject(Register input,Register temp1,Label * is_not_object,Label * is_object)2488 Condition LCodeGen::EmitIsObject(Register input,
2489 Register temp1,
2490 Label* is_not_object,
2491 Label* is_object) {
2492 __ JumpIfSmi(input, is_not_object);
2493
2494 __ cmp(input, isolate()->factory()->null_value());
2495 __ j(equal, is_object);
2496
2497 __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
2498 // Undetectable objects behave like undefined.
2499 __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
2500 1 << Map::kIsUndetectable);
2501 __ j(not_zero, is_not_object);
2502
2503 __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
2504 __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
2505 __ j(below, is_not_object);
2506 __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
2507 return below_equal;
2508 }
2509
2510
DoIsObjectAndBranch(LIsObjectAndBranch * instr)2511 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2512 Register reg = ToRegister(instr->value());
2513 Register temp = ToRegister(instr->temp());
2514
2515 Condition true_cond = EmitIsObject(
2516 reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2517
2518 EmitBranch(instr, true_cond);
2519 }
2520
2521
EmitIsString(Register input,Register temp1,Label * is_not_string,SmiCheck check_needed=INLINE_SMI_CHECK)2522 Condition LCodeGen::EmitIsString(Register input,
2523 Register temp1,
2524 Label* is_not_string,
2525 SmiCheck check_needed = INLINE_SMI_CHECK) {
2526 if (check_needed == INLINE_SMI_CHECK) {
2527 __ JumpIfSmi(input, is_not_string);
2528 }
2529
2530 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2531
2532 return cond;
2533 }
2534
2535
DoIsStringAndBranch(LIsStringAndBranch * instr)2536 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2537 Register reg = ToRegister(instr->value());
2538 Register temp = ToRegister(instr->temp());
2539
2540 SmiCheck check_needed =
2541 instr->hydrogen()->value()->type().IsHeapObject()
2542 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2543
2544 Condition true_cond = EmitIsString(
2545 reg, temp, instr->FalseLabel(chunk_), check_needed);
2546
2547 EmitBranch(instr, true_cond);
2548 }
2549
2550
DoIsSmiAndBranch(LIsSmiAndBranch * instr)2551 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2552 Operand input = ToOperand(instr->value());
2553
2554 __ test(input, Immediate(kSmiTagMask));
2555 EmitBranch(instr, zero);
2556 }
2557
2558
DoIsUndetectableAndBranch(LIsUndetectableAndBranch * instr)2559 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2560 Register input = ToRegister(instr->value());
2561 Register temp = ToRegister(instr->temp());
2562
2563 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2564 STATIC_ASSERT(kSmiTag == 0);
2565 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2566 }
2567 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2568 __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
2569 1 << Map::kIsUndetectable);
2570 EmitBranch(instr, not_zero);
2571 }
2572
2573
ComputeCompareCondition(Token::Value op)2574 static Condition ComputeCompareCondition(Token::Value op) {
2575 switch (op) {
2576 case Token::EQ_STRICT:
2577 case Token::EQ:
2578 return equal;
2579 case Token::LT:
2580 return less;
2581 case Token::GT:
2582 return greater;
2583 case Token::LTE:
2584 return less_equal;
2585 case Token::GTE:
2586 return greater_equal;
2587 default:
2588 UNREACHABLE();
2589 return no_condition;
2590 }
2591 }
2592
2593
DoStringCompareAndBranch(LStringCompareAndBranch * instr)2594 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2595 Token::Value op = instr->op();
2596
2597 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2598 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2599
2600 Condition condition = ComputeCompareCondition(op);
2601 __ test(eax, Operand(eax));
2602
2603 EmitBranch(instr, condition);
2604 }
2605
2606
TestType(HHasInstanceTypeAndBranch * instr)2607 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2608 InstanceType from = instr->from();
2609 InstanceType to = instr->to();
2610 if (from == FIRST_TYPE) return to;
2611 ASSERT(from == to || to == LAST_TYPE);
2612 return from;
2613 }
2614
2615
BranchCondition(HHasInstanceTypeAndBranch * instr)2616 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2617 InstanceType from = instr->from();
2618 InstanceType to = instr->to();
2619 if (from == to) return equal;
2620 if (to == LAST_TYPE) return above_equal;
2621 if (from == FIRST_TYPE) return below_equal;
2622 UNREACHABLE();
2623 return equal;
2624 }
2625
2626
DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch * instr)2627 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2628 Register input = ToRegister(instr->value());
2629 Register temp = ToRegister(instr->temp());
2630
2631 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2632 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2633 }
2634
2635 __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
2636 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2637 }
2638
2639
DoGetCachedArrayIndex(LGetCachedArrayIndex * instr)2640 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2641 Register input = ToRegister(instr->value());
2642 Register result = ToRegister(instr->result());
2643
2644 __ AssertString(input);
2645
2646 __ mov(result, FieldOperand(input, String::kHashFieldOffset));
2647 __ IndexFromHash(result, result);
2648 }
2649
2650
DoHasCachedArrayIndexAndBranch(LHasCachedArrayIndexAndBranch * instr)2651 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2652 LHasCachedArrayIndexAndBranch* instr) {
2653 Register input = ToRegister(instr->value());
2654
2655 __ test(FieldOperand(input, String::kHashFieldOffset),
2656 Immediate(String::kContainsCachedArrayIndexMask));
2657 EmitBranch(instr, equal);
2658 }
2659
2660
2661 // Branches to a label or falls through with the answer in the z flag. Trashes
2662 // the temp registers, but not the input.
EmitClassOfTest(Label * is_true,Label * is_false,Handle<String> class_name,Register input,Register temp,Register temp2)2663 void LCodeGen::EmitClassOfTest(Label* is_true,
2664 Label* is_false,
2665 Handle<String>class_name,
2666 Register input,
2667 Register temp,
2668 Register temp2) {
2669 ASSERT(!input.is(temp));
2670 ASSERT(!input.is(temp2));
2671 ASSERT(!temp.is(temp2));
2672 __ JumpIfSmi(input, is_false);
2673
2674 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2675 // Assuming the following assertions, we can use the same compares to test
2676 // for both being a function type and being in the object type range.
2677 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2678 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2679 FIRST_SPEC_OBJECT_TYPE + 1);
2680 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2681 LAST_SPEC_OBJECT_TYPE - 1);
2682 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2683 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2684 __ j(below, is_false);
2685 __ j(equal, is_true);
2686 __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2687 __ j(equal, is_true);
2688 } else {
2689 // Faster code path to avoid two compares: subtract lower bound from the
2690 // actual type and do a signed compare with the width of the type range.
2691 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2692 __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2693 __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2694 __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2695 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2696 __ j(above, is_false);
2697 }
2698
2699 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2700 // Check if the constructor in the map is a function.
2701 __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
2702 // Objects with a non-function constructor have class 'Object'.
2703 __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
2704 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2705 __ j(not_equal, is_true);
2706 } else {
2707 __ j(not_equal, is_false);
2708 }
2709
2710 // temp now contains the constructor function. Grab the
2711 // instance class name from there.
2712 __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2713 __ mov(temp, FieldOperand(temp,
2714 SharedFunctionInfo::kInstanceClassNameOffset));
2715 // The class name we are testing against is internalized since it's a literal.
2716 // The name in the constructor is internalized because of the way the context
2717 // is booted. This routine isn't expected to work for random API-created
2718 // classes and it doesn't have to because you can't access it with natives
2719 // syntax. Since both sides are internalized it is sufficient to use an
2720 // identity comparison.
2721 __ cmp(temp, class_name);
2722 // End with the answer in the z flag.
2723 }
2724
2725
DoClassOfTestAndBranch(LClassOfTestAndBranch * instr)2726 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2727 Register input = ToRegister(instr->value());
2728 Register temp = ToRegister(instr->temp());
2729 Register temp2 = ToRegister(instr->temp2());
2730
2731 Handle<String> class_name = instr->hydrogen()->class_name();
2732
2733 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2734 class_name, input, temp, temp2);
2735
2736 EmitBranch(instr, equal);
2737 }
2738
2739
DoCmpMapAndBranch(LCmpMapAndBranch * instr)2740 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2741 Register reg = ToRegister(instr->value());
2742 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2743 EmitBranch(instr, equal);
2744 }
2745
2746
DoInstanceOf(LInstanceOf * instr)2747 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2748 // Object and function are in fixed registers defined by the stub.
2749 ASSERT(ToRegister(instr->context()).is(esi));
2750 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2751 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2752
2753 Label true_value, done;
2754 __ test(eax, Operand(eax));
2755 __ j(zero, &true_value, Label::kNear);
2756 __ mov(ToRegister(instr->result()), factory()->false_value());
2757 __ jmp(&done, Label::kNear);
2758 __ bind(&true_value);
2759 __ mov(ToRegister(instr->result()), factory()->true_value());
2760 __ bind(&done);
2761 }
2762
2763
DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal * instr)2764 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2765 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2766 public:
2767 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2768 LInstanceOfKnownGlobal* instr,
2769 const X87Stack& x87_stack)
2770 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
2771 virtual void Generate() V8_OVERRIDE {
2772 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2773 }
2774 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2775 Label* map_check() { return &map_check_; }
2776 private:
2777 LInstanceOfKnownGlobal* instr_;
2778 Label map_check_;
2779 };
2780
2781 DeferredInstanceOfKnownGlobal* deferred;
2782 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_);
2783
2784 Label done, false_result;
2785 Register object = ToRegister(instr->value());
2786 Register temp = ToRegister(instr->temp());
2787
2788 // A Smi is not an instance of anything.
2789 __ JumpIfSmi(object, &false_result, Label::kNear);
2790
2791 // This is the inlined call site instanceof cache. The two occurences of the
2792 // hole value will be patched to the last map/result pair generated by the
2793 // instanceof stub.
2794 Label cache_miss;
2795 Register map = ToRegister(instr->temp());
2796 __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
2797 __ bind(deferred->map_check()); // Label for calculating code patching.
2798 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2799 __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map.
2800 __ j(not_equal, &cache_miss, Label::kNear);
2801 __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
2802 __ jmp(&done, Label::kNear);
2803
2804 // The inlined call site cache did not match. Check for null and string
2805 // before calling the deferred code.
2806 __ bind(&cache_miss);
2807 // Null is not an instance of anything.
2808 __ cmp(object, factory()->null_value());
2809 __ j(equal, &false_result, Label::kNear);
2810
2811 // String values are not instances of anything.
2812 Condition is_string = masm_->IsObjectStringType(object, temp, temp);
2813 __ j(is_string, &false_result, Label::kNear);
2814
2815 // Go to the deferred code.
2816 __ jmp(deferred->entry());
2817
2818 __ bind(&false_result);
2819 __ mov(ToRegister(instr->result()), factory()->false_value());
2820
2821 // Here result has either true or false. Deferred code also produces true or
2822 // false object.
2823 __ bind(deferred->exit());
2824 __ bind(&done);
2825 }
2826
2827
DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal * instr,Label * map_check)2828 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2829 Label* map_check) {
2830 PushSafepointRegistersScope scope(this);
2831
2832 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2833 flags = static_cast<InstanceofStub::Flags>(
2834 flags | InstanceofStub::kArgsInRegisters);
2835 flags = static_cast<InstanceofStub::Flags>(
2836 flags | InstanceofStub::kCallSiteInlineCheck);
2837 flags = static_cast<InstanceofStub::Flags>(
2838 flags | InstanceofStub::kReturnTrueFalseObject);
2839 InstanceofStub stub(isolate(), flags);
2840
2841 // Get the temp register reserved by the instruction. This needs to be a
2842 // register which is pushed last by PushSafepointRegisters as top of the
2843 // stack is used to pass the offset to the location of the map check to
2844 // the stub.
2845 Register temp = ToRegister(instr->temp());
2846 ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
2847 __ LoadHeapObject(InstanceofStub::right(), instr->function());
2848 static const int kAdditionalDelta = 13;
2849 int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2850 __ mov(temp, Immediate(delta));
2851 __ StoreToSafepointRegisterSlot(temp, temp);
2852 CallCodeGeneric(stub.GetCode(),
2853 RelocInfo::CODE_TARGET,
2854 instr,
2855 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2856 // Get the deoptimization index of the LLazyBailout-environment that
2857 // corresponds to this instruction.
2858 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2859 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2860
2861 // Put the result value into the eax slot and restore all registers.
2862 __ StoreToSafepointRegisterSlot(eax, eax);
2863 }
2864
2865
DoCmpT(LCmpT * instr)2866 void LCodeGen::DoCmpT(LCmpT* instr) {
2867 Token::Value op = instr->op();
2868
2869 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2870 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2871
2872 Condition condition = ComputeCompareCondition(op);
2873 Label true_value, done;
2874 __ test(eax, Operand(eax));
2875 __ j(condition, &true_value, Label::kNear);
2876 __ mov(ToRegister(instr->result()), factory()->false_value());
2877 __ jmp(&done, Label::kNear);
2878 __ bind(&true_value);
2879 __ mov(ToRegister(instr->result()), factory()->true_value());
2880 __ bind(&done);
2881 }
2882
2883
EmitReturn(LReturn * instr,bool dynamic_frame_alignment)2884 void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
2885 int extra_value_count = dynamic_frame_alignment ? 2 : 1;
2886
2887 if (instr->has_constant_parameter_count()) {
2888 int parameter_count = ToInteger32(instr->constant_parameter_count());
2889 if (dynamic_frame_alignment && FLAG_debug_code) {
2890 __ cmp(Operand(esp,
2891 (parameter_count + extra_value_count) * kPointerSize),
2892 Immediate(kAlignmentZapValue));
2893 __ Assert(equal, kExpectedAlignmentMarker);
2894 }
2895 __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
2896 } else {
2897 Register reg = ToRegister(instr->parameter_count());
2898 // The argument count parameter is a smi
2899 __ SmiUntag(reg);
2900 Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
2901 if (dynamic_frame_alignment && FLAG_debug_code) {
2902 ASSERT(extra_value_count == 2);
2903 __ cmp(Operand(esp, reg, times_pointer_size,
2904 extra_value_count * kPointerSize),
2905 Immediate(kAlignmentZapValue));
2906 __ Assert(equal, kExpectedAlignmentMarker);
2907 }
2908
2909 // emit code to restore stack based on instr->parameter_count()
2910 __ pop(return_addr_reg); // save return address
2911 if (dynamic_frame_alignment) {
2912 __ inc(reg); // 1 more for alignment
2913 }
2914 __ shl(reg, kPointerSizeLog2);
2915 __ add(esp, reg);
2916 __ jmp(return_addr_reg);
2917 }
2918 }
2919
2920
DoReturn(LReturn * instr)2921 void LCodeGen::DoReturn(LReturn* instr) {
2922 if (FLAG_trace && info()->IsOptimizing()) {
2923 // Preserve the return value on the stack and rely on the runtime call
2924 // to return the value in the same register. We're leaving the code
2925 // managed by the register allocator and tearing down the frame, it's
2926 // safe to write to the context register.
2927 __ push(eax);
2928 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2929 __ CallRuntime(Runtime::kTraceExit, 1);
2930 }
2931 if (dynamic_frame_alignment_) {
2932 // Fetch the state of the dynamic frame alignment.
2933 __ mov(edx, Operand(ebp,
2934 JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
2935 }
2936 int no_frame_start = -1;
2937 if (NeedsEagerFrame()) {
2938 __ mov(esp, ebp);
2939 __ pop(ebp);
2940 no_frame_start = masm_->pc_offset();
2941 }
2942 if (dynamic_frame_alignment_) {
2943 Label no_padding;
2944 __ cmp(edx, Immediate(kNoAlignmentPadding));
2945 __ j(equal, &no_padding, Label::kNear);
2946
2947 EmitReturn(instr, true);
2948 __ bind(&no_padding);
2949 }
2950
2951 EmitReturn(instr, false);
2952 if (no_frame_start != -1) {
2953 info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2954 }
2955 }
2956
2957
DoLoadGlobalCell(LLoadGlobalCell * instr)2958 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2959 Register result = ToRegister(instr->result());
2960 __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
2961 if (instr->hydrogen()->RequiresHoleCheck()) {
2962 __ cmp(result, factory()->the_hole_value());
2963 DeoptimizeIf(equal, instr->environment());
2964 }
2965 }
2966
2967
DoLoadGlobalGeneric(LLoadGlobalGeneric * instr)2968 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2969 ASSERT(ToRegister(instr->context()).is(esi));
2970 ASSERT(ToRegister(instr->global_object()).is(edx));
2971 ASSERT(ToRegister(instr->result()).is(eax));
2972
2973 __ mov(ecx, instr->name());
2974 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2975 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
2976 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2977 }
2978
2979
DoStoreGlobalCell(LStoreGlobalCell * instr)2980 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2981 Register value = ToRegister(instr->value());
2982 Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
2983
2984 // If the cell we are storing to contains the hole it could have
2985 // been deleted from the property dictionary. In that case, we need
2986 // to update the property details in the property dictionary to mark
2987 // it as no longer deleted. We deoptimize in that case.
2988 if (instr->hydrogen()->RequiresHoleCheck()) {
2989 __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
2990 DeoptimizeIf(equal, instr->environment());
2991 }
2992
2993 // Store the value.
2994 __ mov(Operand::ForCell(cell_handle), value);
2995 // Cells are always rescanned, so no write barrier here.
2996 }
2997
2998
DoLoadContextSlot(LLoadContextSlot * instr)2999 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3000 Register context = ToRegister(instr->context());
3001 Register result = ToRegister(instr->result());
3002 __ mov(result, ContextOperand(context, instr->slot_index()));
3003
3004 if (instr->hydrogen()->RequiresHoleCheck()) {
3005 __ cmp(result, factory()->the_hole_value());
3006 if (instr->hydrogen()->DeoptimizesOnHole()) {
3007 DeoptimizeIf(equal, instr->environment());
3008 } else {
3009 Label is_not_hole;
3010 __ j(not_equal, &is_not_hole, Label::kNear);
3011 __ mov(result, factory()->undefined_value());
3012 __ bind(&is_not_hole);
3013 }
3014 }
3015 }
3016
3017
DoStoreContextSlot(LStoreContextSlot * instr)3018 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3019 Register context = ToRegister(instr->context());
3020 Register value = ToRegister(instr->value());
3021
3022 Label skip_assignment;
3023
3024 Operand target = ContextOperand(context, instr->slot_index());
3025 if (instr->hydrogen()->RequiresHoleCheck()) {
3026 __ cmp(target, factory()->the_hole_value());
3027 if (instr->hydrogen()->DeoptimizesOnHole()) {
3028 DeoptimizeIf(equal, instr->environment());
3029 } else {
3030 __ j(not_equal, &skip_assignment, Label::kNear);
3031 }
3032 }
3033
3034 __ mov(target, value);
3035 if (instr->hydrogen()->NeedsWriteBarrier()) {
3036 SmiCheck check_needed =
3037 instr->hydrogen()->value()->type().IsHeapObject()
3038 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3039 Register temp = ToRegister(instr->temp());
3040 int offset = Context::SlotOffset(instr->slot_index());
3041 __ RecordWriteContextSlot(context,
3042 offset,
3043 value,
3044 temp,
3045 EMIT_REMEMBERED_SET,
3046 check_needed);
3047 }
3048
3049 __ bind(&skip_assignment);
3050 }
3051
3052
DoLoadNamedField(LLoadNamedField * instr)3053 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3054 HObjectAccess access = instr->hydrogen()->access();
3055 int offset = access.offset();
3056
3057 if (access.IsExternalMemory()) {
3058 Register result = ToRegister(instr->result());
3059 MemOperand operand = instr->object()->IsConstantOperand()
3060 ? MemOperand::StaticVariable(ToExternalReference(
3061 LConstantOperand::cast(instr->object())))
3062 : MemOperand(ToRegister(instr->object()), offset);
3063 __ Load(result, operand, access.representation());
3064 return;
3065 }
3066
3067 Register object = ToRegister(instr->object());
3068 if (instr->hydrogen()->representation().IsDouble()) {
3069 X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
3070 return;
3071 }
3072
3073 Register result = ToRegister(instr->result());
3074 if (!access.IsInobject()) {
3075 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
3076 object = result;
3077 }
3078 __ Load(result, FieldOperand(object, offset), access.representation());
3079 }
3080
3081
EmitPushTaggedOperand(LOperand * operand)3082 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
3083 ASSERT(!operand->IsDoubleRegister());
3084 if (operand->IsConstantOperand()) {
3085 Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
3086 AllowDeferredHandleDereference smi_check;
3087 if (object->IsSmi()) {
3088 __ Push(Handle<Smi>::cast(object));
3089 } else {
3090 __ PushHeapObject(Handle<HeapObject>::cast(object));
3091 }
3092 } else if (operand->IsRegister()) {
3093 __ push(ToRegister(operand));
3094 } else {
3095 __ push(ToOperand(operand));
3096 }
3097 }
3098
3099
DoLoadNamedGeneric(LLoadNamedGeneric * instr)3100 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3101 ASSERT(ToRegister(instr->context()).is(esi));
3102 ASSERT(ToRegister(instr->object()).is(edx));
3103 ASSERT(ToRegister(instr->result()).is(eax));
3104
3105 __ mov(ecx, instr->name());
3106 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3107 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3108 }
3109
3110
DoLoadFunctionPrototype(LLoadFunctionPrototype * instr)3111 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3112 Register function = ToRegister(instr->function());
3113 Register temp = ToRegister(instr->temp());
3114 Register result = ToRegister(instr->result());
3115
3116 // Check that the function really is a function.
3117 __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
3118 DeoptimizeIf(not_equal, instr->environment());
3119
3120 // Check whether the function has an instance prototype.
3121 Label non_instance;
3122 __ test_b(FieldOperand(result, Map::kBitFieldOffset),
3123 1 << Map::kHasNonInstancePrototype);
3124 __ j(not_zero, &non_instance, Label::kNear);
3125
3126 // Get the prototype or initial map from the function.
3127 __ mov(result,
3128 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3129
3130 // Check that the function has a prototype or an initial map.
3131 __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
3132 DeoptimizeIf(equal, instr->environment());
3133
3134 // If the function does not have an initial map, we're done.
3135 Label done;
3136 __ CmpObjectType(result, MAP_TYPE, temp);
3137 __ j(not_equal, &done, Label::kNear);
3138
3139 // Get the prototype from the initial map.
3140 __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
3141 __ jmp(&done, Label::kNear);
3142
3143 // Non-instance prototype: Fetch prototype from constructor field
3144 // in the function's map.
3145 __ bind(&non_instance);
3146 __ mov(result, FieldOperand(result, Map::kConstructorOffset));
3147
3148 // All done.
3149 __ bind(&done);
3150 }
3151
3152
DoLoadRoot(LLoadRoot * instr)3153 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3154 Register result = ToRegister(instr->result());
3155 __ LoadRoot(result, instr->index());
3156 }
3157
3158
DoAccessArgumentsAt(LAccessArgumentsAt * instr)3159 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3160 Register arguments = ToRegister(instr->arguments());
3161 Register result = ToRegister(instr->result());
3162 if (instr->length()->IsConstantOperand() &&
3163 instr->index()->IsConstantOperand()) {
3164 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3165 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3166 int index = (const_length - const_index) + 1;
3167 __ mov(result, Operand(arguments, index * kPointerSize));
3168 } else {
3169 Register length = ToRegister(instr->length());
3170 Operand index = ToOperand(instr->index());
3171 // There are two words between the frame pointer and the last argument.
3172 // Subtracting from length accounts for one of them add one more.
3173 __ sub(length, index);
3174 __ mov(result, Operand(arguments, length, times_4, kPointerSize));
3175 }
3176 }
3177
3178
DoLoadKeyedExternalArray(LLoadKeyed * instr)3179 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3180 ElementsKind elements_kind = instr->elements_kind();
3181 LOperand* key = instr->key();
3182 if (!key->IsConstantOperand() &&
3183 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3184 elements_kind)) {
3185 __ SmiUntag(ToRegister(key));
3186 }
3187 Operand operand(BuildFastArrayOperand(
3188 instr->elements(),
3189 key,
3190 instr->hydrogen()->key()->representation(),
3191 elements_kind,
3192 instr->base_offset()));
3193 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3194 elements_kind == FLOAT32_ELEMENTS) {
3195 X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
3196 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3197 elements_kind == FLOAT64_ELEMENTS) {
3198 X87Mov(ToX87Register(instr->result()), operand);
3199 } else {
3200 Register result(ToRegister(instr->result()));
3201 switch (elements_kind) {
3202 case EXTERNAL_INT8_ELEMENTS:
3203 case INT8_ELEMENTS:
3204 __ movsx_b(result, operand);
3205 break;
3206 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3207 case EXTERNAL_UINT8_ELEMENTS:
3208 case UINT8_ELEMENTS:
3209 case UINT8_CLAMPED_ELEMENTS:
3210 __ movzx_b(result, operand);
3211 break;
3212 case EXTERNAL_INT16_ELEMENTS:
3213 case INT16_ELEMENTS:
3214 __ movsx_w(result, operand);
3215 break;
3216 case EXTERNAL_UINT16_ELEMENTS:
3217 case UINT16_ELEMENTS:
3218 __ movzx_w(result, operand);
3219 break;
3220 case EXTERNAL_INT32_ELEMENTS:
3221 case INT32_ELEMENTS:
3222 __ mov(result, operand);
3223 break;
3224 case EXTERNAL_UINT32_ELEMENTS:
3225 case UINT32_ELEMENTS:
3226 __ mov(result, operand);
3227 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3228 __ test(result, Operand(result));
3229 DeoptimizeIf(negative, instr->environment());
3230 }
3231 break;
3232 case EXTERNAL_FLOAT32_ELEMENTS:
3233 case EXTERNAL_FLOAT64_ELEMENTS:
3234 case FLOAT32_ELEMENTS:
3235 case FLOAT64_ELEMENTS:
3236 case FAST_SMI_ELEMENTS:
3237 case FAST_ELEMENTS:
3238 case FAST_DOUBLE_ELEMENTS:
3239 case FAST_HOLEY_SMI_ELEMENTS:
3240 case FAST_HOLEY_ELEMENTS:
3241 case FAST_HOLEY_DOUBLE_ELEMENTS:
3242 case DICTIONARY_ELEMENTS:
3243 case SLOPPY_ARGUMENTS_ELEMENTS:
3244 UNREACHABLE();
3245 break;
3246 }
3247 }
3248 }
3249
3250
DoLoadKeyedFixedDoubleArray(LLoadKeyed * instr)3251 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3252 if (instr->hydrogen()->RequiresHoleCheck()) {
3253 Operand hole_check_operand = BuildFastArrayOperand(
3254 instr->elements(), instr->key(),
3255 instr->hydrogen()->key()->representation(),
3256 FAST_DOUBLE_ELEMENTS,
3257 instr->base_offset() + sizeof(kHoleNanLower32));
3258 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
3259 DeoptimizeIf(equal, instr->environment());
3260 }
3261
3262 Operand double_load_operand = BuildFastArrayOperand(
3263 instr->elements(),
3264 instr->key(),
3265 instr->hydrogen()->key()->representation(),
3266 FAST_DOUBLE_ELEMENTS,
3267 instr->base_offset());
3268 X87Mov(ToX87Register(instr->result()), double_load_operand);
3269 }
3270
3271
DoLoadKeyedFixedArray(LLoadKeyed * instr)3272 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3273 Register result = ToRegister(instr->result());
3274
3275 // Load the result.
3276 __ mov(result,
3277 BuildFastArrayOperand(instr->elements(),
3278 instr->key(),
3279 instr->hydrogen()->key()->representation(),
3280 FAST_ELEMENTS,
3281 instr->base_offset()));
3282
3283 // Check for the hole value.
3284 if (instr->hydrogen()->RequiresHoleCheck()) {
3285 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3286 __ test(result, Immediate(kSmiTagMask));
3287 DeoptimizeIf(not_equal, instr->environment());
3288 } else {
3289 __ cmp(result, factory()->the_hole_value());
3290 DeoptimizeIf(equal, instr->environment());
3291 }
3292 }
3293 }
3294
3295
DoLoadKeyed(LLoadKeyed * instr)3296 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3297 if (instr->is_typed_elements()) {
3298 DoLoadKeyedExternalArray(instr);
3299 } else if (instr->hydrogen()->representation().IsDouble()) {
3300 DoLoadKeyedFixedDoubleArray(instr);
3301 } else {
3302 DoLoadKeyedFixedArray(instr);
3303 }
3304 }
3305
3306
BuildFastArrayOperand(LOperand * elements_pointer,LOperand * key,Representation key_representation,ElementsKind elements_kind,uint32_t base_offset)3307 Operand LCodeGen::BuildFastArrayOperand(
3308 LOperand* elements_pointer,
3309 LOperand* key,
3310 Representation key_representation,
3311 ElementsKind elements_kind,
3312 uint32_t base_offset) {
3313 Register elements_pointer_reg = ToRegister(elements_pointer);
3314 int element_shift_size = ElementsKindToShiftSize(elements_kind);
3315 int shift_size = element_shift_size;
3316 if (key->IsConstantOperand()) {
3317 int constant_value = ToInteger32(LConstantOperand::cast(key));
3318 if (constant_value & 0xF0000000) {
3319 Abort(kArrayIndexConstantValueTooBig);
3320 }
3321 return Operand(elements_pointer_reg,
3322 ((constant_value) << shift_size)
3323 + base_offset);
3324 } else {
3325 // Take the tag bit into account while computing the shift size.
3326 if (key_representation.IsSmi() && (shift_size >= 1)) {
3327 shift_size -= kSmiTagSize;
3328 }
3329 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3330 return Operand(elements_pointer_reg,
3331 ToRegister(key),
3332 scale_factor,
3333 base_offset);
3334 }
3335 }
3336
3337
DoLoadKeyedGeneric(LLoadKeyedGeneric * instr)3338 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3339 ASSERT(ToRegister(instr->context()).is(esi));
3340 ASSERT(ToRegister(instr->object()).is(edx));
3341 ASSERT(ToRegister(instr->key()).is(ecx));
3342
3343 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3344 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3345 }
3346
3347
DoArgumentsElements(LArgumentsElements * instr)3348 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3349 Register result = ToRegister(instr->result());
3350
3351 if (instr->hydrogen()->from_inlined()) {
3352 __ lea(result, Operand(esp, -2 * kPointerSize));
3353 } else {
3354 // Check for arguments adapter frame.
3355 Label done, adapted;
3356 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3357 __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
3358 __ cmp(Operand(result),
3359 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3360 __ j(equal, &adapted, Label::kNear);
3361
3362 // No arguments adaptor frame.
3363 __ mov(result, Operand(ebp));
3364 __ jmp(&done, Label::kNear);
3365
3366 // Arguments adaptor frame present.
3367 __ bind(&adapted);
3368 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3369
3370 // Result is the frame pointer for the frame if not adapted and for the real
3371 // frame below the adaptor frame if adapted.
3372 __ bind(&done);
3373 }
3374 }
3375
3376
DoArgumentsLength(LArgumentsLength * instr)3377 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3378 Operand elem = ToOperand(instr->elements());
3379 Register result = ToRegister(instr->result());
3380
3381 Label done;
3382
3383 // If no arguments adaptor frame the number of arguments is fixed.
3384 __ cmp(ebp, elem);
3385 __ mov(result, Immediate(scope()->num_parameters()));
3386 __ j(equal, &done, Label::kNear);
3387
3388 // Arguments adaptor frame present. Get argument length from there.
3389 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3390 __ mov(result, Operand(result,
3391 ArgumentsAdaptorFrameConstants::kLengthOffset));
3392 __ SmiUntag(result);
3393
3394 // Argument length is in result register.
3395 __ bind(&done);
3396 }
3397
3398
DoWrapReceiver(LWrapReceiver * instr)3399 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3400 Register receiver = ToRegister(instr->receiver());
3401 Register function = ToRegister(instr->function());
3402
3403 // If the receiver is null or undefined, we have to pass the global
3404 // object as a receiver to normal functions. Values have to be
3405 // passed unchanged to builtins and strict-mode functions.
3406 Label receiver_ok, global_object;
3407 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3408 Register scratch = ToRegister(instr->temp());
3409
3410 if (!instr->hydrogen()->known_function()) {
3411 // Do not transform the receiver to object for strict mode
3412 // functions.
3413 __ mov(scratch,
3414 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3415 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
3416 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
3417 __ j(not_equal, &receiver_ok, dist);
3418
3419 // Do not transform the receiver to object for builtins.
3420 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
3421 1 << SharedFunctionInfo::kNativeBitWithinByte);
3422 __ j(not_equal, &receiver_ok, dist);
3423 }
3424
3425 // Normal function. Replace undefined or null with global receiver.
3426 __ cmp(receiver, factory()->null_value());
3427 __ j(equal, &global_object, Label::kNear);
3428 __ cmp(receiver, factory()->undefined_value());
3429 __ j(equal, &global_object, Label::kNear);
3430
3431 // The receiver should be a JS object.
3432 __ test(receiver, Immediate(kSmiTagMask));
3433 DeoptimizeIf(equal, instr->environment());
3434 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
3435 DeoptimizeIf(below, instr->environment());
3436
3437 __ jmp(&receiver_ok, Label::kNear);
3438 __ bind(&global_object);
3439 __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
3440 const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
3441 __ mov(receiver, Operand(receiver, global_offset));
3442 const int receiver_offset = GlobalObject::kGlobalReceiverOffset;
3443 __ mov(receiver, FieldOperand(receiver, receiver_offset));
3444 __ bind(&receiver_ok);
3445 }
3446
3447
DoApplyArguments(LApplyArguments * instr)3448 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3449 Register receiver = ToRegister(instr->receiver());
3450 Register function = ToRegister(instr->function());
3451 Register length = ToRegister(instr->length());
3452 Register elements = ToRegister(instr->elements());
3453 ASSERT(receiver.is(eax)); // Used for parameter count.
3454 ASSERT(function.is(edi)); // Required by InvokeFunction.
3455 ASSERT(ToRegister(instr->result()).is(eax));
3456
3457 // Copy the arguments to this function possibly from the
3458 // adaptor frame below it.
3459 const uint32_t kArgumentsLimit = 1 * KB;
3460 __ cmp(length, kArgumentsLimit);
3461 DeoptimizeIf(above, instr->environment());
3462
3463 __ push(receiver);
3464 __ mov(receiver, length);
3465
3466 // Loop through the arguments pushing them onto the execution
3467 // stack.
3468 Label invoke, loop;
3469 // length is a small non-negative integer, due to the test above.
3470 __ test(length, Operand(length));
3471 __ j(zero, &invoke, Label::kNear);
3472 __ bind(&loop);
3473 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
3474 __ dec(length);
3475 __ j(not_zero, &loop);
3476
3477 // Invoke the function.
3478 __ bind(&invoke);
3479 ASSERT(instr->HasPointerMap());
3480 LPointerMap* pointers = instr->pointer_map();
3481 SafepointGenerator safepoint_generator(
3482 this, pointers, Safepoint::kLazyDeopt);
3483 ParameterCount actual(eax);
3484 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3485 }
3486
3487
DoDebugBreak(LDebugBreak * instr)3488 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
3489 __ int3();
3490 }
3491
3492
DoPushArgument(LPushArgument * instr)3493 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3494 LOperand* argument = instr->value();
3495 EmitPushTaggedOperand(argument);
3496 }
3497
3498
DoDrop(LDrop * instr)3499 void LCodeGen::DoDrop(LDrop* instr) {
3500 __ Drop(instr->count());
3501 }
3502
3503
DoThisFunction(LThisFunction * instr)3504 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3505 Register result = ToRegister(instr->result());
3506 __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3507 }
3508
3509
DoContext(LContext * instr)3510 void LCodeGen::DoContext(LContext* instr) {
3511 Register result = ToRegister(instr->result());
3512 if (info()->IsOptimizing()) {
3513 __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
3514 } else {
3515 // If there is no frame, the context must be in esi.
3516 ASSERT(result.is(esi));
3517 }
3518 }
3519
3520
DoDeclareGlobals(LDeclareGlobals * instr)3521 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3522 ASSERT(ToRegister(instr->context()).is(esi));
3523 __ push(esi); // The context is the first argument.
3524 __ push(Immediate(instr->hydrogen()->pairs()));
3525 __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
3526 CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
3527 }
3528
3529
CallKnownFunction(Handle<JSFunction> function,int formal_parameter_count,int arity,LInstruction * instr,EDIState edi_state)3530 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3531 int formal_parameter_count,
3532 int arity,
3533 LInstruction* instr,
3534 EDIState edi_state) {
3535 bool dont_adapt_arguments =
3536 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3537 bool can_invoke_directly =
3538 dont_adapt_arguments || formal_parameter_count == arity;
3539
3540 if (can_invoke_directly) {
3541 if (edi_state == EDI_UNINITIALIZED) {
3542 __ LoadHeapObject(edi, function);
3543 }
3544
3545 // Change context.
3546 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
3547
3548 // Set eax to arguments count if adaption is not needed. Assumes that eax
3549 // is available to write to at this point.
3550 if (dont_adapt_arguments) {
3551 __ mov(eax, arity);
3552 }
3553
3554 // Invoke function directly.
3555 if (function.is_identical_to(info()->closure())) {
3556 __ CallSelf();
3557 } else {
3558 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
3559 }
3560 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3561 } else {
3562 // We need to adapt arguments.
3563 LPointerMap* pointers = instr->pointer_map();
3564 SafepointGenerator generator(
3565 this, pointers, Safepoint::kLazyDeopt);
3566 ParameterCount count(arity);
3567 ParameterCount expected(formal_parameter_count);
3568 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3569 }
3570 }
3571
3572
DoCallWithDescriptor(LCallWithDescriptor * instr)3573 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3574 ASSERT(ToRegister(instr->result()).is(eax));
3575
3576 LPointerMap* pointers = instr->pointer_map();
3577 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3578
3579 if (instr->target()->IsConstantOperand()) {
3580 LConstantOperand* target = LConstantOperand::cast(instr->target());
3581 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3582 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3583 __ call(code, RelocInfo::CODE_TARGET);
3584 } else {
3585 ASSERT(instr->target()->IsRegister());
3586 Register target = ToRegister(instr->target());
3587 generator.BeforeCall(__ CallSize(Operand(target)));
3588 __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3589 __ call(target);
3590 }
3591 generator.AfterCall();
3592 }
3593
3594
DoCallJSFunction(LCallJSFunction * instr)3595 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3596 ASSERT(ToRegister(instr->function()).is(edi));
3597 ASSERT(ToRegister(instr->result()).is(eax));
3598
3599 if (instr->hydrogen()->pass_argument_count()) {
3600 __ mov(eax, instr->arity());
3601 }
3602
3603 // Change context.
3604 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
3605
3606 bool is_self_call = false;
3607 if (instr->hydrogen()->function()->IsConstant()) {
3608 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3609 Handle<JSFunction> jsfun =
3610 Handle<JSFunction>::cast(fun_const->handle(isolate()));
3611 is_self_call = jsfun.is_identical_to(info()->closure());
3612 }
3613
3614 if (is_self_call) {
3615 __ CallSelf();
3616 } else {
3617 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
3618 }
3619
3620 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3621 }
3622
3623
DoDeferredMathAbsTaggedHeapNumber(LMathAbs * instr)3624 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3625 Register input_reg = ToRegister(instr->value());
3626 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
3627 factory()->heap_number_map());
3628 DeoptimizeIf(not_equal, instr->environment());
3629
3630 Label slow, allocated, done;
3631 Register tmp = input_reg.is(eax) ? ecx : eax;
3632 Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
3633
3634 // Preserve the value of all registers.
3635 PushSafepointRegistersScope scope(this);
3636
3637 __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3638 // Check the sign of the argument. If the argument is positive, just
3639 // return it. We do not need to patch the stack since |input| and
3640 // |result| are the same register and |input| will be restored
3641 // unchanged by popping safepoint registers.
3642 __ test(tmp, Immediate(HeapNumber::kSignMask));
3643 __ j(zero, &done, Label::kNear);
3644
3645 __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
3646 __ jmp(&allocated, Label::kNear);
3647
3648 // Slow case: Call the runtime system to do the number allocation.
3649 __ bind(&slow);
3650 CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0,
3651 instr, instr->context());
3652 // Set the pointer to the new heap number in tmp.
3653 if (!tmp.is(eax)) __ mov(tmp, eax);
3654 // Restore input_reg after call to runtime.
3655 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3656
3657 __ bind(&allocated);
3658 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3659 __ and_(tmp2, ~HeapNumber::kSignMask);
3660 __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
3661 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
3662 __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
3663 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3664
3665 __ bind(&done);
3666 }
3667
3668
EmitIntegerMathAbs(LMathAbs * instr)3669 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3670 Register input_reg = ToRegister(instr->value());
3671 __ test(input_reg, Operand(input_reg));
3672 Label is_positive;
3673 __ j(not_sign, &is_positive, Label::kNear);
3674 __ neg(input_reg); // Sets flags.
3675 DeoptimizeIf(negative, instr->environment());
3676 __ bind(&is_positive);
3677 }
3678
3679
DoMathAbs(LMathAbs * instr)3680 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3681 // Class for deferred case.
3682 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3683 public:
3684 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3685 LMathAbs* instr,
3686 const X87Stack& x87_stack)
3687 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
3688 virtual void Generate() V8_OVERRIDE {
3689 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3690 }
3691 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3692 private:
3693 LMathAbs* instr_;
3694 };
3695
3696 ASSERT(instr->value()->Equals(instr->result()));
3697 Representation r = instr->hydrogen()->value()->representation();
3698
3699 if (r.IsDouble()) {
3700 UNIMPLEMENTED();
3701 } else if (r.IsSmiOrInteger32()) {
3702 EmitIntegerMathAbs(instr);
3703 } else { // Tagged case.
3704 DeferredMathAbsTaggedHeapNumber* deferred =
3705 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
3706 Register input_reg = ToRegister(instr->value());
3707 // Smi check.
3708 __ JumpIfNotSmi(input_reg, deferred->entry());
3709 EmitIntegerMathAbs(instr);
3710 __ bind(deferred->exit());
3711 }
3712 }
3713
3714
DoMathFloor(LMathFloor * instr)3715 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3716 UNIMPLEMENTED();
3717 }
3718
3719
DoMathRound(LMathRound * instr)3720 void LCodeGen::DoMathRound(LMathRound* instr) {
3721 UNIMPLEMENTED();
3722 }
3723
3724
DoMathSqrt(LMathSqrt * instr)3725 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3726 UNIMPLEMENTED();
3727 }
3728
3729
DoMathPowHalf(LMathPowHalf * instr)3730 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3731 UNIMPLEMENTED();
3732 }
3733
3734
DoPower(LPower * instr)3735 void LCodeGen::DoPower(LPower* instr) {
3736 UNIMPLEMENTED();
3737 }
3738
3739
DoMathLog(LMathLog * instr)3740 void LCodeGen::DoMathLog(LMathLog* instr) {
3741 UNIMPLEMENTED();
3742 }
3743
3744
DoMathClz32(LMathClz32 * instr)3745 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3746 UNIMPLEMENTED();
3747 }
3748
3749
DoMathExp(LMathExp * instr)3750 void LCodeGen::DoMathExp(LMathExp* instr) {
3751 UNIMPLEMENTED();
3752 }
3753
3754
DoInvokeFunction(LInvokeFunction * instr)3755 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3756 ASSERT(ToRegister(instr->context()).is(esi));
3757 ASSERT(ToRegister(instr->function()).is(edi));
3758 ASSERT(instr->HasPointerMap());
3759
3760 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3761 if (known_function.is_null()) {
3762 LPointerMap* pointers = instr->pointer_map();
3763 SafepointGenerator generator(
3764 this, pointers, Safepoint::kLazyDeopt);
3765 ParameterCount count(instr->arity());
3766 __ InvokeFunction(edi, count, CALL_FUNCTION, generator);
3767 } else {
3768 CallKnownFunction(known_function,
3769 instr->hydrogen()->formal_parameter_count(),
3770 instr->arity(),
3771 instr,
3772 EDI_CONTAINS_TARGET);
3773 }
3774 }
3775
3776
DoCallFunction(LCallFunction * instr)3777 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3778 ASSERT(ToRegister(instr->context()).is(esi));
3779 ASSERT(ToRegister(instr->function()).is(edi));
3780 ASSERT(ToRegister(instr->result()).is(eax));
3781
3782 int arity = instr->arity();
3783 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
3784 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3785 }
3786
3787
DoCallNew(LCallNew * instr)3788 void LCodeGen::DoCallNew(LCallNew* instr) {
3789 ASSERT(ToRegister(instr->context()).is(esi));
3790 ASSERT(ToRegister(instr->constructor()).is(edi));
3791 ASSERT(ToRegister(instr->result()).is(eax));
3792
3793 // No cell in ebx for construct type feedback in optimized code
3794 __ mov(ebx, isolate()->factory()->undefined_value());
3795 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
3796 __ Move(eax, Immediate(instr->arity()));
3797 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3798 }
3799
3800
DoCallNewArray(LCallNewArray * instr)3801 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3802 ASSERT(ToRegister(instr->context()).is(esi));
3803 ASSERT(ToRegister(instr->constructor()).is(edi));
3804 ASSERT(ToRegister(instr->result()).is(eax));
3805
3806 __ Move(eax, Immediate(instr->arity()));
3807 __ mov(ebx, isolate()->factory()->undefined_value());
3808 ElementsKind kind = instr->hydrogen()->elements_kind();
3809 AllocationSiteOverrideMode override_mode =
3810 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3811 ? DISABLE_ALLOCATION_SITES
3812 : DONT_OVERRIDE;
3813
3814 if (instr->arity() == 0) {
3815 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3816 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3817 } else if (instr->arity() == 1) {
3818 Label done;
3819 if (IsFastPackedElementsKind(kind)) {
3820 Label packed_case;
3821 // We might need a change here
3822 // look at the first argument
3823 __ mov(ecx, Operand(esp, 0));
3824 __ test(ecx, ecx);
3825 __ j(zero, &packed_case, Label::kNear);
3826
3827 ElementsKind holey_kind = GetHoleyElementsKind(kind);
3828 ArraySingleArgumentConstructorStub stub(isolate(),
3829 holey_kind,
3830 override_mode);
3831 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3832 __ jmp(&done, Label::kNear);
3833 __ bind(&packed_case);
3834 }
3835
3836 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3837 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3838 __ bind(&done);
3839 } else {
3840 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
3841 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3842 }
3843 }
3844
3845
DoCallRuntime(LCallRuntime * instr)3846 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3847 ASSERT(ToRegister(instr->context()).is(esi));
3848 CallRuntime(instr->function(), instr->arity(), instr);
3849 }
3850
3851
DoStoreCodeEntry(LStoreCodeEntry * instr)3852 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3853 Register function = ToRegister(instr->function());
3854 Register code_object = ToRegister(instr->code_object());
3855 __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
3856 __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
3857 }
3858
3859
DoInnerAllocatedObject(LInnerAllocatedObject * instr)3860 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3861 Register result = ToRegister(instr->result());
3862 Register base = ToRegister(instr->base_object());
3863 if (instr->offset()->IsConstantOperand()) {
3864 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3865 __ lea(result, Operand(base, ToInteger32(offset)));
3866 } else {
3867 Register offset = ToRegister(instr->offset());
3868 __ lea(result, Operand(base, offset, times_1, 0));
3869 }
3870 }
3871
3872
DoStoreNamedField(LStoreNamedField * instr)3873 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3874 Representation representation = instr->hydrogen()->field_representation();
3875
3876 HObjectAccess access = instr->hydrogen()->access();
3877 int offset = access.offset();
3878
3879 if (access.IsExternalMemory()) {
3880 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3881 MemOperand operand = instr->object()->IsConstantOperand()
3882 ? MemOperand::StaticVariable(
3883 ToExternalReference(LConstantOperand::cast(instr->object())))
3884 : MemOperand(ToRegister(instr->object()), offset);
3885 if (instr->value()->IsConstantOperand()) {
3886 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
3887 __ mov(operand, Immediate(ToInteger32(operand_value)));
3888 } else {
3889 Register value = ToRegister(instr->value());
3890 __ Store(value, operand, representation);
3891 }
3892 return;
3893 }
3894
3895 Register object = ToRegister(instr->object());
3896 __ AssertNotSmi(object);
3897 ASSERT(!representation.IsSmi() ||
3898 !instr->value()->IsConstantOperand() ||
3899 IsSmi(LConstantOperand::cast(instr->value())));
3900 if (representation.IsDouble()) {
3901 ASSERT(access.IsInobject());
3902 ASSERT(!instr->hydrogen()->has_transition());
3903 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3904 X87Register value = ToX87Register(instr->value());
3905 X87Mov(FieldOperand(object, offset), value);
3906 return;
3907 }
3908
3909 if (instr->hydrogen()->has_transition()) {
3910 Handle<Map> transition = instr->hydrogen()->transition_map();
3911 AddDeprecationDependency(transition);
3912 __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
3913 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3914 Register temp = ToRegister(instr->temp());
3915 Register temp_map = ToRegister(instr->temp_map());
3916 __ mov(temp_map, transition);
3917 __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
3918 // Update the write barrier for the map field.
3919 __ RecordWriteForMap(object, transition, temp_map, temp);
3920 }
3921 }
3922
3923 // Do the store.
3924 Register write_register = object;
3925 if (!access.IsInobject()) {
3926 write_register = ToRegister(instr->temp());
3927 __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
3928 }
3929
3930 MemOperand operand = FieldOperand(write_register, offset);
3931 if (instr->value()->IsConstantOperand()) {
3932 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
3933 if (operand_value->IsRegister()) {
3934 Register value = ToRegister(operand_value);
3935 __ Store(value, operand, representation);
3936 } else if (representation.IsInteger32()) {
3937 Immediate immediate = ToImmediate(operand_value, representation);
3938 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3939 __ mov(operand, immediate);
3940 } else {
3941 Handle<Object> handle_value = ToHandle(operand_value);
3942 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3943 __ mov(operand, handle_value);
3944 }
3945 } else {
3946 Register value = ToRegister(instr->value());
3947 __ Store(value, operand, representation);
3948 }
3949
3950 if (instr->hydrogen()->NeedsWriteBarrier()) {
3951 Register value = ToRegister(instr->value());
3952 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
3953 // Update the write barrier for the object for in-object properties.
3954 __ RecordWriteField(write_register,
3955 offset,
3956 value,
3957 temp,
3958 EMIT_REMEMBERED_SET,
3959 instr->hydrogen()->SmiCheckForWriteBarrier(),
3960 instr->hydrogen()->PointersToHereCheckForValue());
3961 }
3962 }
3963
3964
DoStoreNamedGeneric(LStoreNamedGeneric * instr)3965 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3966 ASSERT(ToRegister(instr->context()).is(esi));
3967 ASSERT(ToRegister(instr->object()).is(edx));
3968 ASSERT(ToRegister(instr->value()).is(eax));
3969
3970 __ mov(ecx, instr->name());
3971 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
3972 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3973 }
3974
3975
DoBoundsCheck(LBoundsCheck * instr)3976 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3977 Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
3978 if (instr->index()->IsConstantOperand()) {
3979 __ cmp(ToOperand(instr->length()),
3980 ToImmediate(LConstantOperand::cast(instr->index()),
3981 instr->hydrogen()->length()->representation()));
3982 cc = CommuteCondition(cc);
3983 } else if (instr->length()->IsConstantOperand()) {
3984 __ cmp(ToOperand(instr->index()),
3985 ToImmediate(LConstantOperand::cast(instr->length()),
3986 instr->hydrogen()->index()->representation()));
3987 } else {
3988 __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
3989 }
3990 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
3991 Label done;
3992 __ j(NegateCondition(cc), &done, Label::kNear);
3993 __ int3();
3994 __ bind(&done);
3995 } else {
3996 DeoptimizeIf(cc, instr->environment());
3997 }
3998 }
3999
4000
DoStoreKeyedExternalArray(LStoreKeyed * instr)4001 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4002 ElementsKind elements_kind = instr->elements_kind();
4003 LOperand* key = instr->key();
4004 if (!key->IsConstantOperand() &&
4005 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
4006 elements_kind)) {
4007 __ SmiUntag(ToRegister(key));
4008 }
4009 Operand operand(BuildFastArrayOperand(
4010 instr->elements(),
4011 key,
4012 instr->hydrogen()->key()->representation(),
4013 elements_kind,
4014 instr->base_offset()));
4015 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4016 elements_kind == FLOAT32_ELEMENTS) {
4017 __ fld(0);
4018 __ fstp_s(operand);
4019 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4020 elements_kind == FLOAT64_ELEMENTS) {
4021 X87Mov(operand, ToX87Register(instr->value()));
4022 } else {
4023 Register value = ToRegister(instr->value());
4024 switch (elements_kind) {
4025 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4026 case EXTERNAL_UINT8_ELEMENTS:
4027 case EXTERNAL_INT8_ELEMENTS:
4028 case UINT8_ELEMENTS:
4029 case INT8_ELEMENTS:
4030 case UINT8_CLAMPED_ELEMENTS:
4031 __ mov_b(operand, value);
4032 break;
4033 case EXTERNAL_INT16_ELEMENTS:
4034 case EXTERNAL_UINT16_ELEMENTS:
4035 case UINT16_ELEMENTS:
4036 case INT16_ELEMENTS:
4037 __ mov_w(operand, value);
4038 break;
4039 case EXTERNAL_INT32_ELEMENTS:
4040 case EXTERNAL_UINT32_ELEMENTS:
4041 case UINT32_ELEMENTS:
4042 case INT32_ELEMENTS:
4043 __ mov(operand, value);
4044 break;
4045 case EXTERNAL_FLOAT32_ELEMENTS:
4046 case EXTERNAL_FLOAT64_ELEMENTS:
4047 case FLOAT32_ELEMENTS:
4048 case FLOAT64_ELEMENTS:
4049 case FAST_SMI_ELEMENTS:
4050 case FAST_ELEMENTS:
4051 case FAST_DOUBLE_ELEMENTS:
4052 case FAST_HOLEY_SMI_ELEMENTS:
4053 case FAST_HOLEY_ELEMENTS:
4054 case FAST_HOLEY_DOUBLE_ELEMENTS:
4055 case DICTIONARY_ELEMENTS:
4056 case SLOPPY_ARGUMENTS_ELEMENTS:
4057 UNREACHABLE();
4058 break;
4059 }
4060 }
4061 }
4062
4063
DoStoreKeyedFixedDoubleArray(LStoreKeyed * instr)4064 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4065 ExternalReference canonical_nan_reference =
4066 ExternalReference::address_of_canonical_non_hole_nan();
4067 Operand double_store_operand = BuildFastArrayOperand(
4068 instr->elements(),
4069 instr->key(),
4070 instr->hydrogen()->key()->representation(),
4071 FAST_DOUBLE_ELEMENTS,
4072 instr->base_offset());
4073
4074 // Can't use SSE2 in the serializer
4075 if (instr->hydrogen()->IsConstantHoleStore()) {
4076 // This means we should store the (double) hole. No floating point
4077 // registers required.
4078 double nan_double = FixedDoubleArray::hole_nan_as_double();
4079 uint64_t int_val = BitCast<uint64_t, double>(nan_double);
4080 int32_t lower = static_cast<int32_t>(int_val);
4081 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
4082
4083 __ mov(double_store_operand, Immediate(lower));
4084 Operand double_store_operand2 = BuildFastArrayOperand(
4085 instr->elements(),
4086 instr->key(),
4087 instr->hydrogen()->key()->representation(),
4088 FAST_DOUBLE_ELEMENTS,
4089 instr->base_offset() + kPointerSize);
4090 __ mov(double_store_operand2, Immediate(upper));
4091 } else {
4092 Label no_special_nan_handling;
4093 X87Register value = ToX87Register(instr->value());
4094 X87Fxch(value);
4095
4096 if (instr->NeedsCanonicalization()) {
4097 __ fld(0);
4098 __ fld(0);
4099 __ FCmp();
4100
4101 __ j(parity_odd, &no_special_nan_handling, Label::kNear);
4102 __ sub(esp, Immediate(kDoubleSize));
4103 __ fst_d(MemOperand(esp, 0));
4104 __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
4105 Immediate(kHoleNanUpper32));
4106 __ add(esp, Immediate(kDoubleSize));
4107 Label canonicalize;
4108 __ j(not_equal, &canonicalize, Label::kNear);
4109 __ jmp(&no_special_nan_handling, Label::kNear);
4110 __ bind(&canonicalize);
4111 __ fstp(0);
4112 __ fld_d(Operand::StaticVariable(canonical_nan_reference));
4113 }
4114
4115 __ bind(&no_special_nan_handling);
4116 __ fst_d(double_store_operand);
4117 }
4118 }
4119
4120
DoStoreKeyedFixedArray(LStoreKeyed * instr)4121 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4122 Register elements = ToRegister(instr->elements());
4123 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4124
4125 Operand operand = BuildFastArrayOperand(
4126 instr->elements(),
4127 instr->key(),
4128 instr->hydrogen()->key()->representation(),
4129 FAST_ELEMENTS,
4130 instr->base_offset());
4131 if (instr->value()->IsRegister()) {
4132 __ mov(operand, ToRegister(instr->value()));
4133 } else {
4134 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4135 if (IsSmi(operand_value)) {
4136 Immediate immediate = ToImmediate(operand_value, Representation::Smi());
4137 __ mov(operand, immediate);
4138 } else {
4139 ASSERT(!IsInteger32(operand_value));
4140 Handle<Object> handle_value = ToHandle(operand_value);
4141 __ mov(operand, handle_value);
4142 }
4143 }
4144
4145 if (instr->hydrogen()->NeedsWriteBarrier()) {
4146 ASSERT(instr->value()->IsRegister());
4147 Register value = ToRegister(instr->value());
4148 ASSERT(!instr->key()->IsConstantOperand());
4149 SmiCheck check_needed =
4150 instr->hydrogen()->value()->type().IsHeapObject()
4151 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4152 // Compute address of modified element and store it into key register.
4153 __ lea(key, operand);
4154 __ RecordWrite(elements,
4155 key,
4156 value,
4157 EMIT_REMEMBERED_SET,
4158 check_needed,
4159 instr->hydrogen()->PointersToHereCheckForValue());
4160 }
4161 }
4162
4163
DoStoreKeyed(LStoreKeyed * instr)4164 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4165 // By cases...external, fast-double, fast
4166 if (instr->is_typed_elements()) {
4167 DoStoreKeyedExternalArray(instr);
4168 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4169 DoStoreKeyedFixedDoubleArray(instr);
4170 } else {
4171 DoStoreKeyedFixedArray(instr);
4172 }
4173 }
4174
4175
DoStoreKeyedGeneric(LStoreKeyedGeneric * instr)4176 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4177 ASSERT(ToRegister(instr->context()).is(esi));
4178 ASSERT(ToRegister(instr->object()).is(edx));
4179 ASSERT(ToRegister(instr->key()).is(ecx));
4180 ASSERT(ToRegister(instr->value()).is(eax));
4181
4182 Handle<Code> ic = instr->strict_mode() == STRICT
4183 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4184 : isolate()->builtins()->KeyedStoreIC_Initialize();
4185 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4186 }
4187
4188
DoTrapAllocationMemento(LTrapAllocationMemento * instr)4189 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4190 Register object = ToRegister(instr->object());
4191 Register temp = ToRegister(instr->temp());
4192 Label no_memento_found;
4193 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4194 DeoptimizeIf(equal, instr->environment());
4195 __ bind(&no_memento_found);
4196 }
4197
4198
DoTransitionElementsKind(LTransitionElementsKind * instr)4199 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4200 Register object_reg = ToRegister(instr->object());
4201
4202 Handle<Map> from_map = instr->original_map();
4203 Handle<Map> to_map = instr->transitioned_map();
4204 ElementsKind from_kind = instr->from_kind();
4205 ElementsKind to_kind = instr->to_kind();
4206
4207 Label not_applicable;
4208 bool is_simple_map_transition =
4209 IsSimpleMapChangeTransition(from_kind, to_kind);
4210 Label::Distance branch_distance =
4211 is_simple_map_transition ? Label::kNear : Label::kFar;
4212 __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4213 __ j(not_equal, ¬_applicable, branch_distance);
4214 if (is_simple_map_transition) {
4215 Register new_map_reg = ToRegister(instr->new_map_temp());
4216 __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
4217 Immediate(to_map));
4218 // Write barrier.
4219 ASSERT_NE(instr->temp(), NULL);
4220 __ RecordWriteForMap(object_reg, to_map, new_map_reg,
4221 ToRegister(instr->temp()));
4222 } else {
4223 ASSERT(ToRegister(instr->context()).is(esi));
4224 ASSERT(object_reg.is(eax));
4225 PushSafepointRegistersScope scope(this);
4226 __ mov(ebx, to_map);
4227 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4228 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4229 __ CallStub(&stub);
4230 RecordSafepointWithLazyDeopt(instr,
4231 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4232 }
4233 __ bind(¬_applicable);
4234 }
4235
4236
DoStringCharCodeAt(LStringCharCodeAt * instr)4237 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4238 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4239 public:
4240 DeferredStringCharCodeAt(LCodeGen* codegen,
4241 LStringCharCodeAt* instr,
4242 const X87Stack& x87_stack)
4243 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4244 virtual void Generate() V8_OVERRIDE {
4245 codegen()->DoDeferredStringCharCodeAt(instr_);
4246 }
4247 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4248 private:
4249 LStringCharCodeAt* instr_;
4250 };
4251
4252 DeferredStringCharCodeAt* deferred =
4253 new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
4254
4255 StringCharLoadGenerator::Generate(masm(),
4256 factory(),
4257 ToRegister(instr->string()),
4258 ToRegister(instr->index()),
4259 ToRegister(instr->result()),
4260 deferred->entry());
4261 __ bind(deferred->exit());
4262 }
4263
4264
DoDeferredStringCharCodeAt(LStringCharCodeAt * instr)4265 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4266 Register string = ToRegister(instr->string());
4267 Register result = ToRegister(instr->result());
4268
4269 // TODO(3095996): Get rid of this. For now, we need to make the
4270 // result register contain a valid pointer because it is already
4271 // contained in the register pointer map.
4272 __ Move(result, Immediate(0));
4273
4274 PushSafepointRegistersScope scope(this);
4275 __ push(string);
4276 // Push the index as a smi. This is safe because of the checks in
4277 // DoStringCharCodeAt above.
4278 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
4279 if (instr->index()->IsConstantOperand()) {
4280 Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
4281 Representation::Smi());
4282 __ push(immediate);
4283 } else {
4284 Register index = ToRegister(instr->index());
4285 __ SmiTag(index);
4286 __ push(index);
4287 }
4288 CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2,
4289 instr, instr->context());
4290 __ AssertSmi(eax);
4291 __ SmiUntag(eax);
4292 __ StoreToSafepointRegisterSlot(result, eax);
4293 }
4294
4295
DoStringCharFromCode(LStringCharFromCode * instr)4296 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4297 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
4298 public:
4299 DeferredStringCharFromCode(LCodeGen* codegen,
4300 LStringCharFromCode* instr,
4301 const X87Stack& x87_stack)
4302 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4303 virtual void Generate() V8_OVERRIDE {
4304 codegen()->DoDeferredStringCharFromCode(instr_);
4305 }
4306 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4307 private:
4308 LStringCharFromCode* instr_;
4309 };
4310
4311 DeferredStringCharFromCode* deferred =
4312 new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
4313
4314 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4315 Register char_code = ToRegister(instr->char_code());
4316 Register result = ToRegister(instr->result());
4317 ASSERT(!char_code.is(result));
4318
4319 __ cmp(char_code, String::kMaxOneByteCharCode);
4320 __ j(above, deferred->entry());
4321 __ Move(result, Immediate(factory()->single_character_string_cache()));
4322 __ mov(result, FieldOperand(result,
4323 char_code, times_pointer_size,
4324 FixedArray::kHeaderSize));
4325 __ cmp(result, factory()->undefined_value());
4326 __ j(equal, deferred->entry());
4327 __ bind(deferred->exit());
4328 }
4329
4330
DoDeferredStringCharFromCode(LStringCharFromCode * instr)4331 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4332 Register char_code = ToRegister(instr->char_code());
4333 Register result = ToRegister(instr->result());
4334
4335 // TODO(3095996): Get rid of this. For now, we need to make the
4336 // result register contain a valid pointer because it is already
4337 // contained in the register pointer map.
4338 __ Move(result, Immediate(0));
4339
4340 PushSafepointRegistersScope scope(this);
4341 __ SmiTag(char_code);
4342 __ push(char_code);
4343 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4344 __ StoreToSafepointRegisterSlot(result, eax);
4345 }
4346
4347
DoStringAdd(LStringAdd * instr)4348 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4349 ASSERT(ToRegister(instr->context()).is(esi));
4350 ASSERT(ToRegister(instr->left()).is(edx));
4351 ASSERT(ToRegister(instr->right()).is(eax));
4352 StringAddStub stub(isolate(),
4353 instr->hydrogen()->flags(),
4354 instr->hydrogen()->pretenure_flag());
4355 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4356 }
4357
4358
DoInteger32ToDouble(LInteger32ToDouble * instr)4359 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4360 LOperand* input = instr->value();
4361 LOperand* output = instr->result();
4362 ASSERT(input->IsRegister() || input->IsStackSlot());
4363 ASSERT(output->IsDoubleRegister());
4364 if (input->IsRegister()) {
4365 Register input_reg = ToRegister(input);
4366 __ push(input_reg);
4367 X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
4368 __ pop(input_reg);
4369 } else {
4370 X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
4371 }
4372 }
4373
4374
DoUint32ToDouble(LUint32ToDouble * instr)4375 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4376 LOperand* input = instr->value();
4377 LOperand* output = instr->result();
4378 X87Register res = ToX87Register(output);
4379 X87PrepareToWrite(res);
4380 __ LoadUint32NoSSE2(ToRegister(input));
4381 X87CommitWrite(res);
4382 }
4383
4384
DoNumberTagI(LNumberTagI * instr)4385 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4386 class DeferredNumberTagI V8_FINAL : public LDeferredCode {
4387 public:
4388 DeferredNumberTagI(LCodeGen* codegen,
4389 LNumberTagI* instr,
4390 const X87Stack& x87_stack)
4391 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4392 virtual void Generate() V8_OVERRIDE {
4393 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
4394 SIGNED_INT32);
4395 }
4396 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4397 private:
4398 LNumberTagI* instr_;
4399 };
4400
4401 LOperand* input = instr->value();
4402 ASSERT(input->IsRegister() && input->Equals(instr->result()));
4403 Register reg = ToRegister(input);
4404
4405 DeferredNumberTagI* deferred =
4406 new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
4407 __ SmiTag(reg);
4408 __ j(overflow, deferred->entry());
4409 __ bind(deferred->exit());
4410 }
4411
4412
DoNumberTagU(LNumberTagU * instr)4413 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4414 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
4415 public:
4416 DeferredNumberTagU(LCodeGen* codegen,
4417 LNumberTagU* instr,
4418 const X87Stack& x87_stack)
4419 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4420 virtual void Generate() V8_OVERRIDE {
4421 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
4422 UNSIGNED_INT32);
4423 }
4424 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4425 private:
4426 LNumberTagU* instr_;
4427 };
4428
4429 LOperand* input = instr->value();
4430 ASSERT(input->IsRegister() && input->Equals(instr->result()));
4431 Register reg = ToRegister(input);
4432
4433 DeferredNumberTagU* deferred =
4434 new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
4435 __ cmp(reg, Immediate(Smi::kMaxValue));
4436 __ j(above, deferred->entry());
4437 __ SmiTag(reg);
4438 __ bind(deferred->exit());
4439 }
4440
4441
DoDeferredNumberTagIU(LInstruction * instr,LOperand * value,LOperand * temp,IntegerSignedness signedness)4442 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4443 LOperand* value,
4444 LOperand* temp,
4445 IntegerSignedness signedness) {
4446 Label done, slow;
4447 Register reg = ToRegister(value);
4448 Register tmp = ToRegister(temp);
4449
4450 if (signedness == SIGNED_INT32) {
4451 // There was overflow, so bits 30 and 31 of the original integer
4452 // disagree. Try to allocate a heap number in new space and store
4453 // the value in there. If that fails, call the runtime system.
4454 __ SmiUntag(reg);
4455 __ xor_(reg, 0x80000000);
4456 __ push(reg);
4457 __ fild_s(Operand(esp, 0));
4458 __ pop(reg);
4459 } else {
4460 // There's no fild variant for unsigned values, so zero-extend to a 64-bit
4461 // int manually.
4462 __ push(Immediate(0));
4463 __ push(reg);
4464 __ fild_d(Operand(esp, 0));
4465 __ pop(reg);
4466 __ pop(reg);
4467 }
4468
4469 if (FLAG_inline_new) {
4470 __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
4471 __ jmp(&done, Label::kNear);
4472 }
4473
4474 // Slow case: Call the runtime system to do the number allocation.
4475 __ bind(&slow);
4476 {
4477 // TODO(3095996): Put a valid pointer value in the stack slot where the
4478 // result register is stored, as this register is in the pointer map, but
4479 // contains an integer value.
4480 __ Move(reg, Immediate(0));
4481
4482 // Preserve the value of all registers.
4483 PushSafepointRegistersScope scope(this);
4484
4485 // NumberTagI and NumberTagD use the context from the frame, rather than
4486 // the environment's HContext or HInlinedContext value.
4487 // They only call Runtime::kHiddenAllocateHeapNumber.
4488 // The corresponding HChange instructions are added in a phase that does
4489 // not have easy access to the local context.
4490 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4491 __ CallRuntime(Runtime::kHiddenAllocateHeapNumber);
4492 RecordSafepointWithRegisters(
4493 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4494 __ StoreToSafepointRegisterSlot(reg, eax);
4495 }
4496
4497 __ bind(&done);
4498 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
4499 }
4500
4501
DoNumberTagD(LNumberTagD * instr)4502 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4503 class DeferredNumberTagD V8_FINAL : public LDeferredCode {
4504 public:
4505 DeferredNumberTagD(LCodeGen* codegen,
4506 LNumberTagD* instr,
4507 const X87Stack& x87_stack)
4508 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4509 virtual void Generate() V8_OVERRIDE {
4510 codegen()->DoDeferredNumberTagD(instr_);
4511 }
4512 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4513 private:
4514 LNumberTagD* instr_;
4515 };
4516
4517 Register reg = ToRegister(instr->result());
4518
4519 // Put the value to the top of stack
4520 X87Register src = ToX87Register(instr->value());
4521 X87LoadForUsage(src);
4522
4523 DeferredNumberTagD* deferred =
4524 new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
4525 if (FLAG_inline_new) {
4526 Register tmp = ToRegister(instr->temp());
4527 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
4528 } else {
4529 __ jmp(deferred->entry());
4530 }
4531 __ bind(deferred->exit());
4532 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
4533 }
4534
4535
DoDeferredNumberTagD(LNumberTagD * instr)4536 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4537 // TODO(3095996): Get rid of this. For now, we need to make the
4538 // result register contain a valid pointer because it is already
4539 // contained in the register pointer map.
4540 Register reg = ToRegister(instr->result());
4541 __ Move(reg, Immediate(0));
4542
4543 PushSafepointRegistersScope scope(this);
4544 // NumberTagI and NumberTagD use the context from the frame, rather than
4545 // the environment's HContext or HInlinedContext value.
4546 // They only call Runtime::kHiddenAllocateHeapNumber.
4547 // The corresponding HChange instructions are added in a phase that does
4548 // not have easy access to the local context.
4549 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4550 __ CallRuntime(Runtime::kHiddenAllocateHeapNumber);
4551 RecordSafepointWithRegisters(
4552 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4553 __ StoreToSafepointRegisterSlot(reg, eax);
4554 }
4555
4556
DoSmiTag(LSmiTag * instr)4557 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4558 HChange* hchange = instr->hydrogen();
4559 Register input = ToRegister(instr->value());
4560 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4561 hchange->value()->CheckFlag(HValue::kUint32)) {
4562 __ test(input, Immediate(0xc0000000));
4563 DeoptimizeIf(not_zero, instr->environment());
4564 }
4565 __ SmiTag(input);
4566 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4567 !hchange->value()->CheckFlag(HValue::kUint32)) {
4568 DeoptimizeIf(overflow, instr->environment());
4569 }
4570 }
4571
4572
DoSmiUntag(LSmiUntag * instr)4573 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4574 LOperand* input = instr->value();
4575 Register result = ToRegister(input);
4576 ASSERT(input->IsRegister() && input->Equals(instr->result()));
4577 if (instr->needs_check()) {
4578 __ test(result, Immediate(kSmiTagMask));
4579 DeoptimizeIf(not_zero, instr->environment());
4580 } else {
4581 __ AssertSmi(result);
4582 }
4583 __ SmiUntag(result);
4584 }
4585
4586
EmitNumberUntagDNoSSE2(Register input_reg,Register temp_reg,X87Register res_reg,bool can_convert_undefined_to_nan,bool deoptimize_on_minus_zero,LEnvironment * env,NumberUntagDMode mode)4587 void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
4588 Register temp_reg,
4589 X87Register res_reg,
4590 bool can_convert_undefined_to_nan,
4591 bool deoptimize_on_minus_zero,
4592 LEnvironment* env,
4593 NumberUntagDMode mode) {
4594 Label load_smi, done;
4595
4596 X87PrepareToWrite(res_reg);
4597 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4598 // Smi check.
4599 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4600
4601 // Heap number map check.
4602 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4603 factory()->heap_number_map());
4604 if (!can_convert_undefined_to_nan) {
4605 DeoptimizeIf(not_equal, env);
4606 } else {
4607 Label heap_number, convert;
4608 __ j(equal, &heap_number, Label::kNear);
4609
4610 // Convert undefined (or hole) to NaN.
4611 __ cmp(input_reg, factory()->undefined_value());
4612 DeoptimizeIf(not_equal, env);
4613
4614 __ bind(&convert);
4615 ExternalReference nan =
4616 ExternalReference::address_of_canonical_non_hole_nan();
4617 __ fld_d(Operand::StaticVariable(nan));
4618 __ jmp(&done, Label::kNear);
4619
4620 __ bind(&heap_number);
4621 }
4622 // Heap number to x87 conversion.
4623 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
4624 if (deoptimize_on_minus_zero) {
4625 __ fldz();
4626 __ FCmp();
4627 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
4628 __ j(not_zero, &done, Label::kNear);
4629
4630 // Use general purpose registers to check if we have -0.0
4631 __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
4632 __ test(temp_reg, Immediate(HeapNumber::kSignMask));
4633 __ j(zero, &done, Label::kNear);
4634
4635 // Pop FPU stack before deoptimizing.
4636 __ fstp(0);
4637 DeoptimizeIf(not_zero, env);
4638 }
4639 __ jmp(&done, Label::kNear);
4640 } else {
4641 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4642 }
4643
4644 __ bind(&load_smi);
4645 // Clobbering a temp is faster than re-tagging the
4646 // input register since we avoid dependencies.
4647 __ mov(temp_reg, input_reg);
4648 __ SmiUntag(temp_reg); // Untag smi before converting to float.
4649 __ push(temp_reg);
4650 __ fild_s(Operand(esp, 0));
4651 __ add(esp, Immediate(kPointerSize));
4652 __ bind(&done);
4653 X87CommitWrite(res_reg);
4654 }
4655
4656
DoDeferredTaggedToI(LTaggedToI * instr,Label * done)4657 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
4658 Register input_reg = ToRegister(instr->value());
4659
4660 // The input was optimistically untagged; revert it.
4661 STATIC_ASSERT(kSmiTagSize == 1);
4662 __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
4663
4664 if (instr->truncating()) {
4665 Label no_heap_number, check_bools, check_false;
4666
4667 // Heap number map check.
4668 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4669 factory()->heap_number_map());
4670 __ j(not_equal, &no_heap_number, Label::kNear);
4671 __ TruncateHeapNumberToI(input_reg, input_reg);
4672 __ jmp(done);
4673
4674 __ bind(&no_heap_number);
4675 // Check for Oddballs. Undefined/False is converted to zero and True to one
4676 // for truncating conversions.
4677 __ cmp(input_reg, factory()->undefined_value());
4678 __ j(not_equal, &check_bools, Label::kNear);
4679 __ Move(input_reg, Immediate(0));
4680 __ jmp(done);
4681
4682 __ bind(&check_bools);
4683 __ cmp(input_reg, factory()->true_value());
4684 __ j(not_equal, &check_false, Label::kNear);
4685 __ Move(input_reg, Immediate(1));
4686 __ jmp(done);
4687
4688 __ bind(&check_false);
4689 __ cmp(input_reg, factory()->false_value());
4690 __ RecordComment("Deferred TaggedToI: cannot truncate");
4691 DeoptimizeIf(not_equal, instr->environment());
4692 __ Move(input_reg, Immediate(0));
4693 } else {
4694 Label bailout;
4695 __ TaggedToI(input_reg, input_reg,
4696 instr->hydrogen()->GetMinusZeroMode(), &bailout);
4697 __ jmp(done);
4698 __ bind(&bailout);
4699 DeoptimizeIf(no_condition, instr->environment());
4700 }
4701 }
4702
4703
DoTaggedToI(LTaggedToI * instr)4704 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4705 class DeferredTaggedToI V8_FINAL : public LDeferredCode {
4706 public:
4707 DeferredTaggedToI(LCodeGen* codegen,
4708 LTaggedToI* instr,
4709 const X87Stack& x87_stack)
4710 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4711 virtual void Generate() V8_OVERRIDE {
4712 codegen()->DoDeferredTaggedToI(instr_, done());
4713 }
4714 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4715 private:
4716 LTaggedToI* instr_;
4717 };
4718
4719 LOperand* input = instr->value();
4720 ASSERT(input->IsRegister());
4721 Register input_reg = ToRegister(input);
4722 ASSERT(input_reg.is(ToRegister(instr->result())));
4723
4724 if (instr->hydrogen()->value()->representation().IsSmi()) {
4725 __ SmiUntag(input_reg);
4726 } else {
4727 DeferredTaggedToI* deferred =
4728 new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
4729 // Optimistically untag the input.
4730 // If the input is a HeapObject, SmiUntag will set the carry flag.
4731 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
4732 __ SmiUntag(input_reg);
4733 // Branch to deferred code if the input was tagged.
4734 // The deferred code will take care of restoring the tag.
4735 __ j(carry, deferred->entry());
4736 __ bind(deferred->exit());
4737 }
4738 }
4739
4740
DoNumberUntagD(LNumberUntagD * instr)4741 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4742 LOperand* input = instr->value();
4743 ASSERT(input->IsRegister());
4744 LOperand* temp = instr->temp();
4745 ASSERT(temp->IsRegister());
4746 LOperand* result = instr->result();
4747 ASSERT(result->IsDoubleRegister());
4748
4749 Register input_reg = ToRegister(input);
4750 bool deoptimize_on_minus_zero =
4751 instr->hydrogen()->deoptimize_on_minus_zero();
4752 Register temp_reg = ToRegister(temp);
4753
4754 HValue* value = instr->hydrogen()->value();
4755 NumberUntagDMode mode = value->representation().IsSmi()
4756 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4757
4758 EmitNumberUntagDNoSSE2(input_reg,
4759 temp_reg,
4760 ToX87Register(result),
4761 instr->hydrogen()->can_convert_undefined_to_nan(),
4762 deoptimize_on_minus_zero,
4763 instr->environment(),
4764 mode);
4765 }
4766
4767
DoDoubleToI(LDoubleToI * instr)4768 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4769 LOperand* input = instr->value();
4770 ASSERT(input->IsDoubleRegister());
4771 LOperand* result = instr->result();
4772 ASSERT(result->IsRegister());
4773 Register result_reg = ToRegister(result);
4774
4775 if (instr->truncating()) {
4776 X87Register input_reg = ToX87Register(input);
4777 X87Fxch(input_reg);
4778 __ TruncateX87TOSToI(result_reg);
4779 } else {
4780 Label bailout, done;
4781 X87Register input_reg = ToX87Register(input);
4782 X87Fxch(input_reg);
4783 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
4784 &bailout, Label::kNear);
4785 __ jmp(&done, Label::kNear);
4786 __ bind(&bailout);
4787 DeoptimizeIf(no_condition, instr->environment());
4788 __ bind(&done);
4789 }
4790 }
4791
4792
DoDoubleToSmi(LDoubleToSmi * instr)4793 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4794 LOperand* input = instr->value();
4795 ASSERT(input->IsDoubleRegister());
4796 LOperand* result = instr->result();
4797 ASSERT(result->IsRegister());
4798 Register result_reg = ToRegister(result);
4799
4800 Label bailout, done;
4801 X87Register input_reg = ToX87Register(input);
4802 X87Fxch(input_reg);
4803 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
4804 &bailout, Label::kNear);
4805 __ jmp(&done, Label::kNear);
4806 __ bind(&bailout);
4807 DeoptimizeIf(no_condition, instr->environment());
4808 __ bind(&done);
4809
4810 __ SmiTag(result_reg);
4811 DeoptimizeIf(overflow, instr->environment());
4812 }
4813
4814
DoCheckSmi(LCheckSmi * instr)4815 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4816 LOperand* input = instr->value();
4817 __ test(ToOperand(input), Immediate(kSmiTagMask));
4818 DeoptimizeIf(not_zero, instr->environment());
4819 }
4820
4821
DoCheckNonSmi(LCheckNonSmi * instr)4822 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4823 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4824 LOperand* input = instr->value();
4825 __ test(ToOperand(input), Immediate(kSmiTagMask));
4826 DeoptimizeIf(zero, instr->environment());
4827 }
4828 }
4829
4830
DoCheckInstanceType(LCheckInstanceType * instr)4831 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4832 Register input = ToRegister(instr->value());
4833 Register temp = ToRegister(instr->temp());
4834
4835 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
4836
4837 if (instr->hydrogen()->is_interval_check()) {
4838 InstanceType first;
4839 InstanceType last;
4840 instr->hydrogen()->GetCheckInterval(&first, &last);
4841
4842 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
4843 static_cast<int8_t>(first));
4844
4845 // If there is only one type in the interval check for equality.
4846 if (first == last) {
4847 DeoptimizeIf(not_equal, instr->environment());
4848 } else {
4849 DeoptimizeIf(below, instr->environment());
4850 // Omit check for the last type.
4851 if (last != LAST_TYPE) {
4852 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
4853 static_cast<int8_t>(last));
4854 DeoptimizeIf(above, instr->environment());
4855 }
4856 }
4857 } else {
4858 uint8_t mask;
4859 uint8_t tag;
4860 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4861
4862 if (IsPowerOf2(mask)) {
4863 ASSERT(tag == 0 || IsPowerOf2(tag));
4864 __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
4865 DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
4866 } else {
4867 __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
4868 __ and_(temp, mask);
4869 __ cmp(temp, tag);
4870 DeoptimizeIf(not_equal, instr->environment());
4871 }
4872 }
4873 }
4874
4875
DoCheckValue(LCheckValue * instr)4876 void LCodeGen::DoCheckValue(LCheckValue* instr) {
4877 Handle<HeapObject> object = instr->hydrogen()->object().handle();
4878 if (instr->hydrogen()->object_in_new_space()) {
4879 Register reg = ToRegister(instr->value());
4880 Handle<Cell> cell = isolate()->factory()->NewCell(object);
4881 __ cmp(reg, Operand::ForCell(cell));
4882 } else {
4883 Operand operand = ToOperand(instr->value());
4884 __ cmp(operand, object);
4885 }
4886 DeoptimizeIf(not_equal, instr->environment());
4887 }
4888
4889
DoDeferredInstanceMigration(LCheckMaps * instr,Register object)4890 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
4891 {
4892 PushSafepointRegistersScope scope(this);
4893 __ push(object);
4894 __ xor_(esi, esi);
4895 __ CallRuntime(Runtime::kTryMigrateInstance);
4896 RecordSafepointWithRegisters(
4897 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
4898
4899 __ test(eax, Immediate(kSmiTagMask));
4900 }
4901 DeoptimizeIf(zero, instr->environment());
4902 }
4903
4904
DoCheckMaps(LCheckMaps * instr)4905 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4906 class DeferredCheckMaps V8_FINAL : public LDeferredCode {
4907 public:
4908 DeferredCheckMaps(LCodeGen* codegen,
4909 LCheckMaps* instr,
4910 Register object,
4911 const X87Stack& x87_stack)
4912 : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
4913 SetExit(check_maps());
4914 }
4915 virtual void Generate() V8_OVERRIDE {
4916 codegen()->DoDeferredInstanceMigration(instr_, object_);
4917 }
4918 Label* check_maps() { return &check_maps_; }
4919 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4920 private:
4921 LCheckMaps* instr_;
4922 Label check_maps_;
4923 Register object_;
4924 };
4925
4926 if (instr->hydrogen()->IsStabilityCheck()) {
4927 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4928 for (int i = 0; i < maps->size(); ++i) {
4929 AddStabilityDependency(maps->at(i).handle());
4930 }
4931 return;
4932 }
4933
4934 LOperand* input = instr->value();
4935 ASSERT(input->IsRegister());
4936 Register reg = ToRegister(input);
4937
4938 DeferredCheckMaps* deferred = NULL;
4939 if (instr->hydrogen()->HasMigrationTarget()) {
4940 deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
4941 __ bind(deferred->check_maps());
4942 }
4943
4944 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4945 Label success;
4946 for (int i = 0; i < maps->size() - 1; i++) {
4947 Handle<Map> map = maps->at(i).handle();
4948 __ CompareMap(reg, map);
4949 __ j(equal, &success, Label::kNear);
4950 }
4951
4952 Handle<Map> map = maps->at(maps->size() - 1).handle();
4953 __ CompareMap(reg, map);
4954 if (instr->hydrogen()->HasMigrationTarget()) {
4955 __ j(not_equal, deferred->entry());
4956 } else {
4957 DeoptimizeIf(not_equal, instr->environment());
4958 }
4959
4960 __ bind(&success);
4961 }
4962
4963
DoClampDToUint8(LClampDToUint8 * instr)4964 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4965 UNREACHABLE();
4966 }
4967
4968
DoClampIToUint8(LClampIToUint8 * instr)4969 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4970 ASSERT(instr->unclamped()->Equals(instr->result()));
4971 Register value_reg = ToRegister(instr->result());
4972 __ ClampUint8(value_reg);
4973 }
4974
4975
DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2 * instr)4976 void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
4977 Register input_reg = ToRegister(instr->unclamped());
4978 Register result_reg = ToRegister(instr->result());
4979 Register scratch = ToRegister(instr->scratch());
4980 Register scratch2 = ToRegister(instr->scratch2());
4981 Register scratch3 = ToRegister(instr->scratch3());
4982 Label is_smi, done, heap_number, valid_exponent,
4983 largest_value, zero_result, maybe_nan_or_infinity;
4984
4985 __ JumpIfSmi(input_reg, &is_smi);
4986
4987 // Check for heap number
4988 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4989 factory()->heap_number_map());
4990 __ j(equal, &heap_number, Label::kNear);
4991
4992 // Check for undefined. Undefined is converted to zero for clamping
4993 // conversions.
4994 __ cmp(input_reg, factory()->undefined_value());
4995 DeoptimizeIf(not_equal, instr->environment());
4996 __ jmp(&zero_result, Label::kNear);
4997
4998 // Heap number
4999 __ bind(&heap_number);
5000
5001 // Surprisingly, all of the hand-crafted bit-manipulations below are much
5002 // faster than the x86 FPU built-in instruction, especially since "banker's
5003 // rounding" would be additionally very expensive
5004
5005 // Get exponent word.
5006 __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5007 __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5008
5009 // Test for negative values --> clamp to zero
5010 __ test(scratch, scratch);
5011 __ j(negative, &zero_result, Label::kNear);
5012
5013 // Get exponent alone in scratch2.
5014 __ mov(scratch2, scratch);
5015 __ and_(scratch2, HeapNumber::kExponentMask);
5016 __ shr(scratch2, HeapNumber::kExponentShift);
5017 __ j(zero, &zero_result, Label::kNear);
5018 __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
5019 __ j(negative, &zero_result, Label::kNear);
5020
5021 const uint32_t non_int8_exponent = 7;
5022 __ cmp(scratch2, Immediate(non_int8_exponent + 1));
5023 // If the exponent is too big, check for special values.
5024 __ j(greater, &maybe_nan_or_infinity, Label::kNear);
5025
5026 __ bind(&valid_exponent);
5027 // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
5028 // < 7. The shift bias is the number of bits to shift the mantissa such that
5029 // with an exponent of 7 such the that top-most one is in bit 30, allowing
5030 // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
5031 // 1).
5032 int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
5033 __ lea(result_reg, MemOperand(scratch2, shift_bias));
5034 // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the
5035 // top bits of the mantissa.
5036 __ and_(scratch, HeapNumber::kMantissaMask);
5037 // Put back the implicit 1 of the mantissa
5038 __ or_(scratch, 1 << HeapNumber::kExponentShift);
5039 // Shift up to round
5040 __ shl_cl(scratch);
5041 // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
5042 // use the bit in the "ones" place and add it to the "halves" place, which has
5043 // the effect of rounding to even.
5044 __ mov(scratch2, scratch);
5045 const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
5046 const uint32_t one_bit_shift = one_half_bit_shift + 1;
5047 __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
5048 __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
5049 Label no_round;
5050 __ j(less, &no_round, Label::kNear);
5051 Label round_up;
5052 __ mov(scratch2, Immediate(1 << one_half_bit_shift));
5053 __ j(greater, &round_up, Label::kNear);
5054 __ test(scratch3, scratch3);
5055 __ j(not_zero, &round_up, Label::kNear);
5056 __ mov(scratch2, scratch);
5057 __ and_(scratch2, Immediate(1 << one_bit_shift));
5058 __ shr(scratch2, 1);
5059 __ bind(&round_up);
5060 __ add(scratch, scratch2);
5061 __ j(overflow, &largest_value, Label::kNear);
5062 __ bind(&no_round);
5063 __ shr(scratch, 23);
5064 __ mov(result_reg, scratch);
5065 __ jmp(&done, Label::kNear);
5066
5067 __ bind(&maybe_nan_or_infinity);
5068 // Check for NaN/Infinity, all other values map to 255
5069 __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
5070 __ j(not_equal, &largest_value, Label::kNear);
5071
5072 // Check for NaN, which differs from Infinity in that at least one mantissa
5073 // bit is set.
5074 __ and_(scratch, HeapNumber::kMantissaMask);
5075 __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5076 __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN
5077 // Infinity -> Fall through to map to 255.
5078
5079 __ bind(&largest_value);
5080 __ mov(result_reg, Immediate(255));
5081 __ jmp(&done, Label::kNear);
5082
5083 __ bind(&zero_result);
5084 __ xor_(result_reg, result_reg);
5085 __ jmp(&done, Label::kNear);
5086
5087 // smi
5088 __ bind(&is_smi);
5089 if (!input_reg.is(result_reg)) {
5090 __ mov(result_reg, input_reg);
5091 }
5092 __ SmiUntag(result_reg);
5093 __ ClampUint8(result_reg);
5094 __ bind(&done);
5095 }
5096
5097
DoDoubleBits(LDoubleBits * instr)5098 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5099 UNREACHABLE();
5100 }
5101
5102
DoConstructDouble(LConstructDouble * instr)5103 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5104 UNREACHABLE();
5105 }
5106
5107
DoAllocate(LAllocate * instr)5108 void LCodeGen::DoAllocate(LAllocate* instr) {
5109 class DeferredAllocate V8_FINAL : public LDeferredCode {
5110 public:
5111 DeferredAllocate(LCodeGen* codegen,
5112 LAllocate* instr,
5113 const X87Stack& x87_stack)
5114 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5115 virtual void Generate() V8_OVERRIDE {
5116 codegen()->DoDeferredAllocate(instr_);
5117 }
5118 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5119 private:
5120 LAllocate* instr_;
5121 };
5122
5123 DeferredAllocate* deferred =
5124 new(zone()) DeferredAllocate(this, instr, x87_stack_);
5125
5126 Register result = ToRegister(instr->result());
5127 Register temp = ToRegister(instr->temp());
5128
5129 // Allocate memory for the object.
5130 AllocationFlags flags = TAG_OBJECT;
5131 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5132 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5133 }
5134 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5135 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5136 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5137 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5138 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5139 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5140 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5141 }
5142
5143 if (instr->size()->IsConstantOperand()) {
5144 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5145 if (size <= Page::kMaxRegularHeapObjectSize) {
5146 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5147 } else {
5148 __ jmp(deferred->entry());
5149 }
5150 } else {
5151 Register size = ToRegister(instr->size());
5152 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5153 }
5154
5155 __ bind(deferred->exit());
5156
5157 if (instr->hydrogen()->MustPrefillWithFiller()) {
5158 if (instr->size()->IsConstantOperand()) {
5159 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5160 __ mov(temp, (size / kPointerSize) - 1);
5161 } else {
5162 temp = ToRegister(instr->size());
5163 __ shr(temp, kPointerSizeLog2);
5164 __ dec(temp);
5165 }
5166 Label loop;
5167 __ bind(&loop);
5168 __ mov(FieldOperand(result, temp, times_pointer_size, 0),
5169 isolate()->factory()->one_pointer_filler_map());
5170 __ dec(temp);
5171 __ j(not_zero, &loop);
5172 }
5173 }
5174
5175
DoDeferredAllocate(LAllocate * instr)5176 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5177 Register result = ToRegister(instr->result());
5178
5179 // TODO(3095996): Get rid of this. For now, we need to make the
5180 // result register contain a valid pointer because it is already
5181 // contained in the register pointer map.
5182 __ Move(result, Immediate(Smi::FromInt(0)));
5183
5184 PushSafepointRegistersScope scope(this);
5185 if (instr->size()->IsRegister()) {
5186 Register size = ToRegister(instr->size());
5187 ASSERT(!size.is(result));
5188 __ SmiTag(ToRegister(instr->size()));
5189 __ push(size);
5190 } else {
5191 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5192 if (size >= 0 && size <= Smi::kMaxValue) {
5193 __ push(Immediate(Smi::FromInt(size)));
5194 } else {
5195 // We should never get here at runtime => abort
5196 __ int3();
5197 return;
5198 }
5199 }
5200
5201 int flags = AllocateDoubleAlignFlag::encode(
5202 instr->hydrogen()->MustAllocateDoubleAligned());
5203 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5204 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5205 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5206 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5207 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5208 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5209 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5210 } else {
5211 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5212 }
5213 __ push(Immediate(Smi::FromInt(flags)));
5214
5215 CallRuntimeFromDeferred(
5216 Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
5217 __ StoreToSafepointRegisterSlot(result, eax);
5218 }
5219
5220
DoToFastProperties(LToFastProperties * instr)5221 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5222 ASSERT(ToRegister(instr->value()).is(eax));
5223 __ push(eax);
5224 CallRuntime(Runtime::kToFastProperties, 1, instr);
5225 }
5226
5227
DoRegExpLiteral(LRegExpLiteral * instr)5228 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5229 ASSERT(ToRegister(instr->context()).is(esi));
5230 Label materialized;
5231 // Registers will be used as follows:
5232 // ecx = literals array.
5233 // ebx = regexp literal.
5234 // eax = regexp literal clone.
5235 // esi = context.
5236 int literal_offset =
5237 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5238 __ LoadHeapObject(ecx, instr->hydrogen()->literals());
5239 __ mov(ebx, FieldOperand(ecx, literal_offset));
5240 __ cmp(ebx, factory()->undefined_value());
5241 __ j(not_equal, &materialized, Label::kNear);
5242
5243 // Create regexp literal using runtime function
5244 // Result will be in eax.
5245 __ push(ecx);
5246 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
5247 __ push(Immediate(instr->hydrogen()->pattern()));
5248 __ push(Immediate(instr->hydrogen()->flags()));
5249 CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
5250 __ mov(ebx, eax);
5251
5252 __ bind(&materialized);
5253 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5254 Label allocated, runtime_allocate;
5255 __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
5256 __ jmp(&allocated, Label::kNear);
5257
5258 __ bind(&runtime_allocate);
5259 __ push(ebx);
5260 __ push(Immediate(Smi::FromInt(size)));
5261 CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
5262 __ pop(ebx);
5263
5264 __ bind(&allocated);
5265 // Copy the content into the newly allocated memory.
5266 // (Unroll copy loop once for better throughput).
5267 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5268 __ mov(edx, FieldOperand(ebx, i));
5269 __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
5270 __ mov(FieldOperand(eax, i), edx);
5271 __ mov(FieldOperand(eax, i + kPointerSize), ecx);
5272 }
5273 if ((size % (2 * kPointerSize)) != 0) {
5274 __ mov(edx, FieldOperand(ebx, size - kPointerSize));
5275 __ mov(FieldOperand(eax, size - kPointerSize), edx);
5276 }
5277 }
5278
5279
DoFunctionLiteral(LFunctionLiteral * instr)5280 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5281 ASSERT(ToRegister(instr->context()).is(esi));
5282 // Use the fast case closure allocation code that allocates in new
5283 // space for nested functions that don't need literals cloning.
5284 bool pretenure = instr->hydrogen()->pretenure();
5285 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5286 FastNewClosureStub stub(isolate(),
5287 instr->hydrogen()->strict_mode(),
5288 instr->hydrogen()->is_generator());
5289 __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
5290 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5291 } else {
5292 __ push(esi);
5293 __ push(Immediate(instr->hydrogen()->shared_info()));
5294 __ push(Immediate(pretenure ? factory()->true_value()
5295 : factory()->false_value()));
5296 CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
5297 }
5298 }
5299
5300
DoTypeof(LTypeof * instr)5301 void LCodeGen::DoTypeof(LTypeof* instr) {
5302 ASSERT(ToRegister(instr->context()).is(esi));
5303 LOperand* input = instr->value();
5304 EmitPushTaggedOperand(input);
5305 CallRuntime(Runtime::kTypeof, 1, instr);
5306 }
5307
5308
DoTypeofIsAndBranch(LTypeofIsAndBranch * instr)5309 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5310 Register input = ToRegister(instr->value());
5311 Condition final_branch_condition = EmitTypeofIs(instr, input);
5312 if (final_branch_condition != no_condition) {
5313 EmitBranch(instr, final_branch_condition);
5314 }
5315 }
5316
5317
EmitTypeofIs(LTypeofIsAndBranch * instr,Register input)5318 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5319 Label* true_label = instr->TrueLabel(chunk_);
5320 Label* false_label = instr->FalseLabel(chunk_);
5321 Handle<String> type_name = instr->type_literal();
5322 int left_block = instr->TrueDestination(chunk_);
5323 int right_block = instr->FalseDestination(chunk_);
5324 int next_block = GetNextEmittedBlock();
5325
5326 Label::Distance true_distance = left_block == next_block ? Label::kNear
5327 : Label::kFar;
5328 Label::Distance false_distance = right_block == next_block ? Label::kNear
5329 : Label::kFar;
5330 Condition final_branch_condition = no_condition;
5331 if (String::Equals(type_name, factory()->number_string())) {
5332 __ JumpIfSmi(input, true_label, true_distance);
5333 __ cmp(FieldOperand(input, HeapObject::kMapOffset),
5334 factory()->heap_number_map());
5335 final_branch_condition = equal;
5336
5337 } else if (String::Equals(type_name, factory()->string_string())) {
5338 __ JumpIfSmi(input, false_label, false_distance);
5339 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5340 __ j(above_equal, false_label, false_distance);
5341 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5342 1 << Map::kIsUndetectable);
5343 final_branch_condition = zero;
5344
5345 } else if (String::Equals(type_name, factory()->symbol_string())) {
5346 __ JumpIfSmi(input, false_label, false_distance);
5347 __ CmpObjectType(input, SYMBOL_TYPE, input);
5348 final_branch_condition = equal;
5349
5350 } else if (String::Equals(type_name, factory()->boolean_string())) {
5351 __ cmp(input, factory()->true_value());
5352 __ j(equal, true_label, true_distance);
5353 __ cmp(input, factory()->false_value());
5354 final_branch_condition = equal;
5355
5356 } else if (FLAG_harmony_typeof &&
5357 String::Equals(type_name, factory()->null_string())) {
5358 __ cmp(input, factory()->null_value());
5359 final_branch_condition = equal;
5360
5361 } else if (String::Equals(type_name, factory()->undefined_string())) {
5362 __ cmp(input, factory()->undefined_value());
5363 __ j(equal, true_label, true_distance);
5364 __ JumpIfSmi(input, false_label, false_distance);
5365 // Check for undetectable objects => true.
5366 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
5367 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5368 1 << Map::kIsUndetectable);
5369 final_branch_condition = not_zero;
5370
5371 } else if (String::Equals(type_name, factory()->function_string())) {
5372 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5373 __ JumpIfSmi(input, false_label, false_distance);
5374 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
5375 __ j(equal, true_label, true_distance);
5376 __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
5377 final_branch_condition = equal;
5378
5379 } else if (String::Equals(type_name, factory()->object_string())) {
5380 __ JumpIfSmi(input, false_label, false_distance);
5381 if (!FLAG_harmony_typeof) {
5382 __ cmp(input, factory()->null_value());
5383 __ j(equal, true_label, true_distance);
5384 }
5385 __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
5386 __ j(below, false_label, false_distance);
5387 __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5388 __ j(above, false_label, false_distance);
5389 // Check for undetectable objects => false.
5390 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5391 1 << Map::kIsUndetectable);
5392 final_branch_condition = zero;
5393
5394 } else {
5395 __ jmp(false_label, false_distance);
5396 }
5397 return final_branch_condition;
5398 }
5399
5400
DoIsConstructCallAndBranch(LIsConstructCallAndBranch * instr)5401 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5402 Register temp = ToRegister(instr->temp());
5403
5404 EmitIsConstructCall(temp);
5405 EmitBranch(instr, equal);
5406 }
5407
5408
EmitIsConstructCall(Register temp)5409 void LCodeGen::EmitIsConstructCall(Register temp) {
5410 // Get the frame pointer for the calling frame.
5411 __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
5412
5413 // Skip the arguments adaptor frame if it exists.
5414 Label check_frame_marker;
5415 __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
5416 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5417 __ j(not_equal, &check_frame_marker, Label::kNear);
5418 __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
5419
5420 // Check the marker in the calling frame.
5421 __ bind(&check_frame_marker);
5422 __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
5423 Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
5424 }
5425
5426
EnsureSpaceForLazyDeopt(int space_needed)5427 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5428 if (!info()->IsStub()) {
5429 // Ensure that we have enough space after the previous lazy-bailout
5430 // instruction for patching the code here.
5431 int current_pc = masm()->pc_offset();
5432 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5433 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5434 __ Nop(padding_size);
5435 }
5436 }
5437 last_lazy_deopt_pc_ = masm()->pc_offset();
5438 }
5439
5440
DoLazyBailout(LLazyBailout * instr)5441 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5442 last_lazy_deopt_pc_ = masm()->pc_offset();
5443 ASSERT(instr->HasEnvironment());
5444 LEnvironment* env = instr->environment();
5445 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5446 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5447 }
5448
5449
DoDeoptimize(LDeoptimize * instr)5450 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5451 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5452 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5453 // needed return address), even though the implementation of LAZY and EAGER is
5454 // now identical. When LAZY is eventually completely folded into EAGER, remove
5455 // the special case below.
5456 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5457 type = Deoptimizer::LAZY;
5458 }
5459 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
5460 DeoptimizeIf(no_condition, instr->environment(), type);
5461 }
5462
5463
DoDummy(LDummy * instr)5464 void LCodeGen::DoDummy(LDummy* instr) {
5465 // Nothing to see here, move on!
5466 }
5467
5468
DoDummyUse(LDummyUse * instr)5469 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5470 // Nothing to see here, move on!
5471 }
5472
5473
DoDeferredStackCheck(LStackCheck * instr)5474 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5475 PushSafepointRegistersScope scope(this);
5476 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
5477 __ CallRuntime(Runtime::kHiddenStackGuard);
5478 RecordSafepointWithLazyDeopt(
5479 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5480 ASSERT(instr->HasEnvironment());
5481 LEnvironment* env = instr->environment();
5482 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5483 }
5484
5485
DoStackCheck(LStackCheck * instr)5486 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5487 class DeferredStackCheck V8_FINAL : public LDeferredCode {
5488 public:
5489 DeferredStackCheck(LCodeGen* codegen,
5490 LStackCheck* instr,
5491 const X87Stack& x87_stack)
5492 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5493 virtual void Generate() V8_OVERRIDE {
5494 codegen()->DoDeferredStackCheck(instr_);
5495 }
5496 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5497 private:
5498 LStackCheck* instr_;
5499 };
5500
5501 ASSERT(instr->HasEnvironment());
5502 LEnvironment* env = instr->environment();
5503 // There is no LLazyBailout instruction for stack-checks. We have to
5504 // prepare for lazy deoptimization explicitly here.
5505 if (instr->hydrogen()->is_function_entry()) {
5506 // Perform stack overflow check.
5507 Label done;
5508 ExternalReference stack_limit =
5509 ExternalReference::address_of_stack_limit(isolate());
5510 __ cmp(esp, Operand::StaticVariable(stack_limit));
5511 __ j(above_equal, &done, Label::kNear);
5512
5513 ASSERT(instr->context()->IsRegister());
5514 ASSERT(ToRegister(instr->context()).is(esi));
5515 CallCode(isolate()->builtins()->StackCheck(),
5516 RelocInfo::CODE_TARGET,
5517 instr);
5518 __ bind(&done);
5519 } else {
5520 ASSERT(instr->hydrogen()->is_backwards_branch());
5521 // Perform stack overflow check if this goto needs it before jumping.
5522 DeferredStackCheck* deferred_stack_check =
5523 new(zone()) DeferredStackCheck(this, instr, x87_stack_);
5524 ExternalReference stack_limit =
5525 ExternalReference::address_of_stack_limit(isolate());
5526 __ cmp(esp, Operand::StaticVariable(stack_limit));
5527 __ j(below, deferred_stack_check->entry());
5528 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5529 __ bind(instr->done_label());
5530 deferred_stack_check->SetExit(instr->done_label());
5531 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5532 // Don't record a deoptimization index for the safepoint here.
5533 // This will be done explicitly when emitting call and the safepoint in
5534 // the deferred code.
5535 }
5536 }
5537
5538
DoOsrEntry(LOsrEntry * instr)5539 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5540 // This is a pseudo-instruction that ensures that the environment here is
5541 // properly registered for deoptimization and records the assembler's PC
5542 // offset.
5543 LEnvironment* environment = instr->environment();
5544
5545 // If the environment were already registered, we would have no way of
5546 // backpatching it with the spill slot operands.
5547 ASSERT(!environment->HasBeenRegistered());
5548 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5549
5550 GenerateOsrPrologue();
5551 }
5552
5553
DoForInPrepareMap(LForInPrepareMap * instr)5554 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5555 ASSERT(ToRegister(instr->context()).is(esi));
5556 __ cmp(eax, isolate()->factory()->undefined_value());
5557 DeoptimizeIf(equal, instr->environment());
5558
5559 __ cmp(eax, isolate()->factory()->null_value());
5560 DeoptimizeIf(equal, instr->environment());
5561
5562 __ test(eax, Immediate(kSmiTagMask));
5563 DeoptimizeIf(zero, instr->environment());
5564
5565 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5566 __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
5567 DeoptimizeIf(below_equal, instr->environment());
5568
5569 Label use_cache, call_runtime;
5570 __ CheckEnumCache(&call_runtime);
5571
5572 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
5573 __ jmp(&use_cache, Label::kNear);
5574
5575 // Get the set of properties to enumerate.
5576 __ bind(&call_runtime);
5577 __ push(eax);
5578 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5579
5580 __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
5581 isolate()->factory()->meta_map());
5582 DeoptimizeIf(not_equal, instr->environment());
5583 __ bind(&use_cache);
5584 }
5585
5586
DoForInCacheArray(LForInCacheArray * instr)5587 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5588 Register map = ToRegister(instr->map());
5589 Register result = ToRegister(instr->result());
5590 Label load_cache, done;
5591 __ EnumLength(result, map);
5592 __ cmp(result, Immediate(Smi::FromInt(0)));
5593 __ j(not_equal, &load_cache, Label::kNear);
5594 __ mov(result, isolate()->factory()->empty_fixed_array());
5595 __ jmp(&done, Label::kNear);
5596
5597 __ bind(&load_cache);
5598 __ LoadInstanceDescriptors(map, result);
5599 __ mov(result,
5600 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
5601 __ mov(result,
5602 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5603 __ bind(&done);
5604 __ test(result, result);
5605 DeoptimizeIf(equal, instr->environment());
5606 }
5607
5608
DoCheckMapValue(LCheckMapValue * instr)5609 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5610 Register object = ToRegister(instr->value());
5611 __ cmp(ToRegister(instr->map()),
5612 FieldOperand(object, HeapObject::kMapOffset));
5613 DeoptimizeIf(not_equal, instr->environment());
5614 }
5615
5616
DoDeferredLoadMutableDouble(LLoadFieldByIndex * instr,Register object,Register index)5617 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5618 Register object,
5619 Register index) {
5620 PushSafepointRegistersScope scope(this);
5621 __ push(object);
5622 __ push(index);
5623 __ xor_(esi, esi);
5624 __ CallRuntime(Runtime::kLoadMutableDouble);
5625 RecordSafepointWithRegisters(
5626 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5627 __ StoreToSafepointRegisterSlot(object, eax);
5628 }
5629
5630
DoLoadFieldByIndex(LLoadFieldByIndex * instr)5631 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5632 class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
5633 public:
5634 DeferredLoadMutableDouble(LCodeGen* codegen,
5635 LLoadFieldByIndex* instr,
5636 Register object,
5637 Register index,
5638 const X87Stack& x87_stack)
5639 : LDeferredCode(codegen, x87_stack),
5640 instr_(instr),
5641 object_(object),
5642 index_(index) {
5643 }
5644 virtual void Generate() V8_OVERRIDE {
5645 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
5646 }
5647 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5648 private:
5649 LLoadFieldByIndex* instr_;
5650 Register object_;
5651 Register index_;
5652 };
5653
5654 Register object = ToRegister(instr->object());
5655 Register index = ToRegister(instr->index());
5656
5657 DeferredLoadMutableDouble* deferred;
5658 deferred = new(zone()) DeferredLoadMutableDouble(
5659 this, instr, object, index, x87_stack_);
5660
5661 Label out_of_object, done;
5662 __ test(index, Immediate(Smi::FromInt(1)));
5663 __ j(not_zero, deferred->entry());
5664
5665 __ sar(index, 1);
5666
5667 __ cmp(index, Immediate(0));
5668 __ j(less, &out_of_object, Label::kNear);
5669 __ mov(object, FieldOperand(object,
5670 index,
5671 times_half_pointer_size,
5672 JSObject::kHeaderSize));
5673 __ jmp(&done, Label::kNear);
5674
5675 __ bind(&out_of_object);
5676 __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
5677 __ neg(index);
5678 // Index is now equal to out of object property index plus 1.
5679 __ mov(object, FieldOperand(object,
5680 index,
5681 times_half_pointer_size,
5682 FixedArray::kHeaderSize - kPointerSize));
5683 __ bind(deferred->exit());
5684 __ bind(&done);
5685 }
5686
5687
DoStoreFrameContext(LStoreFrameContext * instr)5688 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5689 Register context = ToRegister(instr->context());
5690 __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
5691 }
5692
5693
DoAllocateBlockContext(LAllocateBlockContext * instr)5694 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5695 Handle<ScopeInfo> scope_info = instr->scope_info();
5696 __ Push(scope_info);
5697 __ push(ToRegister(instr->function()));
5698 CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
5699 RecordSafepoint(Safepoint::kNoLazyDeopt);
5700 }
5701
5702
5703 #undef __
5704
5705 } } // namespace v8::internal
5706
5707 #endif // V8_TARGET_ARCH_X87
5708