• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #include "src/arm64/lithium-codegen-arm64.h"
8 #include "src/arm64/lithium-gap-resolver-arm64.h"
9 #include "src/code-stubs.h"
10 #include "src/stub-cache.h"
11 #include "src/hydrogen-osr.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 
17 class SafepointGenerator V8_FINAL : public CallWrapper {
18  public:
SafepointGenerator(LCodeGen * codegen,LPointerMap * pointers,Safepoint::DeoptMode mode)19   SafepointGenerator(LCodeGen* codegen,
20                      LPointerMap* pointers,
21                      Safepoint::DeoptMode mode)
22       : codegen_(codegen),
23         pointers_(pointers),
24         deopt_mode_(mode) { }
~SafepointGenerator()25   virtual ~SafepointGenerator() { }
26 
BeforeCall(int call_size) const27   virtual void BeforeCall(int call_size) const { }
28 
AfterCall() const29   virtual void AfterCall() const {
30     codegen_->RecordSafepoint(pointers_, deopt_mode_);
31   }
32 
33  private:
34   LCodeGen* codegen_;
35   LPointerMap* pointers_;
36   Safepoint::DeoptMode deopt_mode_;
37 };
38 
39 
40 #define __ masm()->
41 
42 // Emit code to branch if the given condition holds.
43 // The code generated here doesn't modify the flags and they must have
44 // been set by some prior instructions.
45 //
46 // The EmitInverted function simply inverts the condition.
47 class BranchOnCondition : public BranchGenerator {
48  public:
BranchOnCondition(LCodeGen * codegen,Condition cond)49   BranchOnCondition(LCodeGen* codegen, Condition cond)
50     : BranchGenerator(codegen),
51       cond_(cond) { }
52 
Emit(Label * label) const53   virtual void Emit(Label* label) const {
54     __ B(cond_, label);
55   }
56 
EmitInverted(Label * label) const57   virtual void EmitInverted(Label* label) const {
58     if (cond_ != al) {
59       __ B(NegateCondition(cond_), label);
60     }
61   }
62 
63  private:
64   Condition cond_;
65 };
66 
67 
68 // Emit code to compare lhs and rhs and branch if the condition holds.
69 // This uses MacroAssembler's CompareAndBranch function so it will handle
70 // converting the comparison to Cbz/Cbnz if the right-hand side is 0.
71 //
72 // EmitInverted still compares the two operands but inverts the condition.
73 class CompareAndBranch : public BranchGenerator {
74  public:
CompareAndBranch(LCodeGen * codegen,Condition cond,const Register & lhs,const Operand & rhs)75   CompareAndBranch(LCodeGen* codegen,
76                    Condition cond,
77                    const Register& lhs,
78                    const Operand& rhs)
79     : BranchGenerator(codegen),
80       cond_(cond),
81       lhs_(lhs),
82       rhs_(rhs) { }
83 
Emit(Label * label) const84   virtual void Emit(Label* label) const {
85     __ CompareAndBranch(lhs_, rhs_, cond_, label);
86   }
87 
EmitInverted(Label * label) const88   virtual void EmitInverted(Label* label) const {
89     __ CompareAndBranch(lhs_, rhs_, NegateCondition(cond_), label);
90   }
91 
92  private:
93   Condition cond_;
94   const Register& lhs_;
95   const Operand& rhs_;
96 };
97 
98 
99 // Test the input with the given mask and branch if the condition holds.
100 // If the condition is 'eq' or 'ne' this will use MacroAssembler's
101 // TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
102 // conversion to Tbz/Tbnz when possible.
103 class TestAndBranch : public BranchGenerator {
104  public:
TestAndBranch(LCodeGen * codegen,Condition cond,const Register & value,uint64_t mask)105   TestAndBranch(LCodeGen* codegen,
106                 Condition cond,
107                 const Register& value,
108                 uint64_t mask)
109     : BranchGenerator(codegen),
110       cond_(cond),
111       value_(value),
112       mask_(mask) { }
113 
Emit(Label * label) const114   virtual void Emit(Label* label) const {
115     switch (cond_) {
116       case eq:
117         __ TestAndBranchIfAllClear(value_, mask_, label);
118         break;
119       case ne:
120         __ TestAndBranchIfAnySet(value_, mask_, label);
121         break;
122       default:
123         __ Tst(value_, mask_);
124         __ B(cond_, label);
125     }
126   }
127 
EmitInverted(Label * label) const128   virtual void EmitInverted(Label* label) const {
129     // The inverse of "all clear" is "any set" and vice versa.
130     switch (cond_) {
131       case eq:
132         __ TestAndBranchIfAnySet(value_, mask_, label);
133         break;
134       case ne:
135         __ TestAndBranchIfAllClear(value_, mask_, label);
136         break;
137       default:
138         __ Tst(value_, mask_);
139         __ B(NegateCondition(cond_), label);
140     }
141   }
142 
143  private:
144   Condition cond_;
145   const Register& value_;
146   uint64_t mask_;
147 };
148 
149 
150 // Test the input and branch if it is non-zero and not a NaN.
151 class BranchIfNonZeroNumber : public BranchGenerator {
152  public:
BranchIfNonZeroNumber(LCodeGen * codegen,const FPRegister & value,const FPRegister & scratch)153   BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
154                         const FPRegister& scratch)
155     : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
156 
Emit(Label * label) const157   virtual void Emit(Label* label) const {
158     __ Fabs(scratch_, value_);
159     // Compare with 0.0. Because scratch_ is positive, the result can be one of
160     // nZCv (equal), nzCv (greater) or nzCV (unordered).
161     __ Fcmp(scratch_, 0.0);
162     __ B(gt, label);
163   }
164 
EmitInverted(Label * label) const165   virtual void EmitInverted(Label* label) const {
166     __ Fabs(scratch_, value_);
167     __ Fcmp(scratch_, 0.0);
168     __ B(le, label);
169   }
170 
171  private:
172   const FPRegister& value_;
173   const FPRegister& scratch_;
174 };
175 
176 
177 // Test the input and branch if it is a heap number.
178 class BranchIfHeapNumber : public BranchGenerator {
179  public:
BranchIfHeapNumber(LCodeGen * codegen,const Register & value)180   BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
181       : BranchGenerator(codegen), value_(value) { }
182 
Emit(Label * label) const183   virtual void Emit(Label* label) const {
184     __ JumpIfHeapNumber(value_, label);
185   }
186 
EmitInverted(Label * label) const187   virtual void EmitInverted(Label* label) const {
188     __ JumpIfNotHeapNumber(value_, label);
189   }
190 
191  private:
192   const Register& value_;
193 };
194 
195 
196 // Test the input and branch if it is the specified root value.
197 class BranchIfRoot : public BranchGenerator {
198  public:
BranchIfRoot(LCodeGen * codegen,const Register & value,Heap::RootListIndex index)199   BranchIfRoot(LCodeGen* codegen, const Register& value,
200                Heap::RootListIndex index)
201       : BranchGenerator(codegen), value_(value), index_(index) { }
202 
Emit(Label * label) const203   virtual void Emit(Label* label) const {
204     __ JumpIfRoot(value_, index_, label);
205   }
206 
EmitInverted(Label * label) const207   virtual void EmitInverted(Label* label) const {
208     __ JumpIfNotRoot(value_, index_, label);
209   }
210 
211  private:
212   const Register& value_;
213   const Heap::RootListIndex index_;
214 };
215 
216 
WriteTranslation(LEnvironment * environment,Translation * translation)217 void LCodeGen::WriteTranslation(LEnvironment* environment,
218                                 Translation* translation) {
219   if (environment == NULL) return;
220 
221   // The translation includes one command per value in the environment.
222   int translation_size = environment->translation_size();
223   // The output frame height does not include the parameters.
224   int height = translation_size - environment->parameter_count();
225 
226   WriteTranslation(environment->outer(), translation);
227   bool has_closure_id = !info()->closure().is_null() &&
228       !info()->closure().is_identical_to(environment->closure());
229   int closure_id = has_closure_id
230       ? DefineDeoptimizationLiteral(environment->closure())
231       : Translation::kSelfLiteralId;
232 
233   switch (environment->frame_type()) {
234     case JS_FUNCTION:
235       translation->BeginJSFrame(environment->ast_id(), closure_id, height);
236       break;
237     case JS_CONSTRUCT:
238       translation->BeginConstructStubFrame(closure_id, translation_size);
239       break;
240     case JS_GETTER:
241       ASSERT(translation_size == 1);
242       ASSERT(height == 0);
243       translation->BeginGetterStubFrame(closure_id);
244       break;
245     case JS_SETTER:
246       ASSERT(translation_size == 2);
247       ASSERT(height == 0);
248       translation->BeginSetterStubFrame(closure_id);
249       break;
250     case STUB:
251       translation->BeginCompiledStubFrame();
252       break;
253     case ARGUMENTS_ADAPTOR:
254       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
255       break;
256     default:
257       UNREACHABLE();
258   }
259 
260   int object_index = 0;
261   int dematerialized_index = 0;
262   for (int i = 0; i < translation_size; ++i) {
263     LOperand* value = environment->values()->at(i);
264 
265     AddToTranslation(environment,
266                      translation,
267                      value,
268                      environment->HasTaggedValueAt(i),
269                      environment->HasUint32ValueAt(i),
270                      &object_index,
271                      &dematerialized_index);
272   }
273 }
274 
275 
AddToTranslation(LEnvironment * environment,Translation * translation,LOperand * op,bool is_tagged,bool is_uint32,int * object_index_pointer,int * dematerialized_index_pointer)276 void LCodeGen::AddToTranslation(LEnvironment* environment,
277                                 Translation* translation,
278                                 LOperand* op,
279                                 bool is_tagged,
280                                 bool is_uint32,
281                                 int* object_index_pointer,
282                                 int* dematerialized_index_pointer) {
283   if (op == LEnvironment::materialization_marker()) {
284     int object_index = (*object_index_pointer)++;
285     if (environment->ObjectIsDuplicateAt(object_index)) {
286       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
287       translation->DuplicateObject(dupe_of);
288       return;
289     }
290     int object_length = environment->ObjectLengthAt(object_index);
291     if (environment->ObjectIsArgumentsAt(object_index)) {
292       translation->BeginArgumentsObject(object_length);
293     } else {
294       translation->BeginCapturedObject(object_length);
295     }
296     int dematerialized_index = *dematerialized_index_pointer;
297     int env_offset = environment->translation_size() + dematerialized_index;
298     *dematerialized_index_pointer += object_length;
299     for (int i = 0; i < object_length; ++i) {
300       LOperand* value = environment->values()->at(env_offset + i);
301       AddToTranslation(environment,
302                        translation,
303                        value,
304                        environment->HasTaggedValueAt(env_offset + i),
305                        environment->HasUint32ValueAt(env_offset + i),
306                        object_index_pointer,
307                        dematerialized_index_pointer);
308     }
309     return;
310   }
311 
312   if (op->IsStackSlot()) {
313     if (is_tagged) {
314       translation->StoreStackSlot(op->index());
315     } else if (is_uint32) {
316       translation->StoreUint32StackSlot(op->index());
317     } else {
318       translation->StoreInt32StackSlot(op->index());
319     }
320   } else if (op->IsDoubleStackSlot()) {
321     translation->StoreDoubleStackSlot(op->index());
322   } else if (op->IsRegister()) {
323     Register reg = ToRegister(op);
324     if (is_tagged) {
325       translation->StoreRegister(reg);
326     } else if (is_uint32) {
327       translation->StoreUint32Register(reg);
328     } else {
329       translation->StoreInt32Register(reg);
330     }
331   } else if (op->IsDoubleRegister()) {
332     DoubleRegister reg = ToDoubleRegister(op);
333     translation->StoreDoubleRegister(reg);
334   } else if (op->IsConstantOperand()) {
335     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
336     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
337     translation->StoreLiteral(src_index);
338   } else {
339     UNREACHABLE();
340   }
341 }
342 
343 
DefineDeoptimizationLiteral(Handle<Object> literal)344 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
345   int result = deoptimization_literals_.length();
346   for (int i = 0; i < deoptimization_literals_.length(); ++i) {
347     if (deoptimization_literals_[i].is_identical_to(literal)) return i;
348   }
349   deoptimization_literals_.Add(literal, zone());
350   return result;
351 }
352 
353 
RegisterEnvironmentForDeoptimization(LEnvironment * environment,Safepoint::DeoptMode mode)354 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
355                                                     Safepoint::DeoptMode mode) {
356   environment->set_has_been_used();
357   if (!environment->HasBeenRegistered()) {
358     int frame_count = 0;
359     int jsframe_count = 0;
360     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
361       ++frame_count;
362       if (e->frame_type() == JS_FUNCTION) {
363         ++jsframe_count;
364       }
365     }
366     Translation translation(&translations_, frame_count, jsframe_count, zone());
367     WriteTranslation(environment, &translation);
368     int deoptimization_index = deoptimizations_.length();
369     int pc_offset = masm()->pc_offset();
370     environment->Register(deoptimization_index,
371                           translation.index(),
372                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
373     deoptimizations_.Add(environment, zone());
374   }
375 }
376 
377 
CallCode(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr)378 void LCodeGen::CallCode(Handle<Code> code,
379                         RelocInfo::Mode mode,
380                         LInstruction* instr) {
381   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
382 }
383 
384 
CallCodeGeneric(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr,SafepointMode safepoint_mode)385 void LCodeGen::CallCodeGeneric(Handle<Code> code,
386                                RelocInfo::Mode mode,
387                                LInstruction* instr,
388                                SafepointMode safepoint_mode) {
389   ASSERT(instr != NULL);
390 
391   Assembler::BlockPoolsScope scope(masm_);
392   __ Call(code, mode);
393   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
394 
395   if ((code->kind() == Code::BINARY_OP_IC) ||
396       (code->kind() == Code::COMPARE_IC)) {
397     // Signal that we don't inline smi code before these stubs in the
398     // optimizing code generator.
399     InlineSmiCheckInfo::EmitNotInlined(masm());
400   }
401 }
402 
403 
DoCallFunction(LCallFunction * instr)404 void LCodeGen::DoCallFunction(LCallFunction* instr) {
405   ASSERT(ToRegister(instr->context()).is(cp));
406   ASSERT(ToRegister(instr->function()).Is(x1));
407   ASSERT(ToRegister(instr->result()).Is(x0));
408 
409   int arity = instr->arity();
410   CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
411   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
412   after_push_argument_ = false;
413 }
414 
415 
DoCallNew(LCallNew * instr)416 void LCodeGen::DoCallNew(LCallNew* instr) {
417   ASSERT(ToRegister(instr->context()).is(cp));
418   ASSERT(instr->IsMarkedAsCall());
419   ASSERT(ToRegister(instr->constructor()).is(x1));
420 
421   __ Mov(x0, instr->arity());
422   // No cell in x2 for construct type feedback in optimized code.
423   __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
424 
425   CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
426   CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
427   after_push_argument_ = false;
428 
429   ASSERT(ToRegister(instr->result()).is(x0));
430 }
431 
432 
DoCallNewArray(LCallNewArray * instr)433 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
434   ASSERT(instr->IsMarkedAsCall());
435   ASSERT(ToRegister(instr->context()).is(cp));
436   ASSERT(ToRegister(instr->constructor()).is(x1));
437 
438   __ Mov(x0, Operand(instr->arity()));
439   __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
440 
441   ElementsKind kind = instr->hydrogen()->elements_kind();
442   AllocationSiteOverrideMode override_mode =
443       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
444           ? DISABLE_ALLOCATION_SITES
445           : DONT_OVERRIDE;
446 
447   if (instr->arity() == 0) {
448     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
449     CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
450   } else if (instr->arity() == 1) {
451     Label done;
452     if (IsFastPackedElementsKind(kind)) {
453       Label packed_case;
454 
455       // We might need to create a holey array; look at the first argument.
456       __ Peek(x10, 0);
457       __ Cbz(x10, &packed_case);
458 
459       ElementsKind holey_kind = GetHoleyElementsKind(kind);
460       ArraySingleArgumentConstructorStub stub(isolate(),
461                                               holey_kind,
462                                               override_mode);
463       CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
464       __ B(&done);
465       __ Bind(&packed_case);
466     }
467 
468     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
469     CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
470     __ Bind(&done);
471   } else {
472     ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
473     CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
474   }
475   after_push_argument_ = false;
476 
477   ASSERT(ToRegister(instr->result()).is(x0));
478 }
479 
480 
CallRuntime(const Runtime::Function * function,int num_arguments,LInstruction * instr,SaveFPRegsMode save_doubles)481 void LCodeGen::CallRuntime(const Runtime::Function* function,
482                            int num_arguments,
483                            LInstruction* instr,
484                            SaveFPRegsMode save_doubles) {
485   ASSERT(instr != NULL);
486 
487   __ CallRuntime(function, num_arguments, save_doubles);
488 
489   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
490 }
491 
492 
LoadContextFromDeferred(LOperand * context)493 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
494   if (context->IsRegister()) {
495     __ Mov(cp, ToRegister(context));
496   } else if (context->IsStackSlot()) {
497     __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
498   } else if (context->IsConstantOperand()) {
499     HConstant* constant =
500         chunk_->LookupConstant(LConstantOperand::cast(context));
501     __ LoadHeapObject(cp,
502                       Handle<HeapObject>::cast(constant->handle(isolate())));
503   } else {
504     UNREACHABLE();
505   }
506 }
507 
508 
CallRuntimeFromDeferred(Runtime::FunctionId id,int argc,LInstruction * instr,LOperand * context)509 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
510                                        int argc,
511                                        LInstruction* instr,
512                                        LOperand* context) {
513   LoadContextFromDeferred(context);
514   __ CallRuntimeSaveDoubles(id);
515   RecordSafepointWithRegisters(
516       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
517 }
518 
519 
RecordAndWritePosition(int position)520 void LCodeGen::RecordAndWritePosition(int position) {
521   if (position == RelocInfo::kNoPosition) return;
522   masm()->positions_recorder()->RecordPosition(position);
523   masm()->positions_recorder()->WriteRecordedPositions();
524 }
525 
526 
RecordSafepointWithLazyDeopt(LInstruction * instr,SafepointMode safepoint_mode)527 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
528                                             SafepointMode safepoint_mode) {
529   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
530     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
531   } else {
532     ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
533     RecordSafepointWithRegisters(
534         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
535   }
536 }
537 
538 
RecordSafepoint(LPointerMap * pointers,Safepoint::Kind kind,int arguments,Safepoint::DeoptMode deopt_mode)539 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
540                                Safepoint::Kind kind,
541                                int arguments,
542                                Safepoint::DeoptMode deopt_mode) {
543   ASSERT(expected_safepoint_kind_ == kind);
544 
545   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
546   Safepoint safepoint = safepoints_.DefineSafepoint(
547       masm(), kind, arguments, deopt_mode);
548 
549   for (int i = 0; i < operands->length(); i++) {
550     LOperand* pointer = operands->at(i);
551     if (pointer->IsStackSlot()) {
552       safepoint.DefinePointerSlot(pointer->index(), zone());
553     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
554       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
555     }
556   }
557 
558   if (kind & Safepoint::kWithRegisters) {
559     // Register cp always contains a pointer to the context.
560     safepoint.DefinePointerRegister(cp, zone());
561   }
562 }
563 
RecordSafepoint(LPointerMap * pointers,Safepoint::DeoptMode deopt_mode)564 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
565                                Safepoint::DeoptMode deopt_mode) {
566   RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
567 }
568 
569 
RecordSafepoint(Safepoint::DeoptMode deopt_mode)570 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
571   LPointerMap empty_pointers(zone());
572   RecordSafepoint(&empty_pointers, deopt_mode);
573 }
574 
575 
RecordSafepointWithRegisters(LPointerMap * pointers,int arguments,Safepoint::DeoptMode deopt_mode)576 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
577                                             int arguments,
578                                             Safepoint::DeoptMode deopt_mode) {
579   RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
580 }
581 
582 
RecordSafepointWithRegistersAndDoubles(LPointerMap * pointers,int arguments,Safepoint::DeoptMode deopt_mode)583 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
584     LPointerMap* pointers, int arguments, Safepoint::DeoptMode deopt_mode) {
585   RecordSafepoint(
586       pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
587 }
588 
589 
GenerateCode()590 bool LCodeGen::GenerateCode() {
591   LPhase phase("Z_Code generation", chunk());
592   ASSERT(is_unused());
593   status_ = GENERATING;
594 
595   // Open a frame scope to indicate that there is a frame on the stack.  The
596   // NONE indicates that the scope shouldn't actually generate code to set up
597   // the frame (that is done in GeneratePrologue).
598   FrameScope frame_scope(masm_, StackFrame::NONE);
599 
600   return GeneratePrologue() &&
601       GenerateBody() &&
602       GenerateDeferredCode() &&
603       GenerateDeoptJumpTable() &&
604       GenerateSafepointTable();
605 }
606 
607 
SaveCallerDoubles()608 void LCodeGen::SaveCallerDoubles() {
609   ASSERT(info()->saves_caller_doubles());
610   ASSERT(NeedsEagerFrame());
611   Comment(";;; Save clobbered callee double registers");
612   BitVector* doubles = chunk()->allocated_double_registers();
613   BitVector::Iterator iterator(doubles);
614   int count = 0;
615   while (!iterator.Done()) {
616     // TODO(all): Is this supposed to save just the callee-saved doubles? It
617     // looks like it's saving all of them.
618     FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
619     __ Poke(value, count * kDoubleSize);
620     iterator.Advance();
621     count++;
622   }
623 }
624 
625 
RestoreCallerDoubles()626 void LCodeGen::RestoreCallerDoubles() {
627   ASSERT(info()->saves_caller_doubles());
628   ASSERT(NeedsEagerFrame());
629   Comment(";;; Restore clobbered callee double registers");
630   BitVector* doubles = chunk()->allocated_double_registers();
631   BitVector::Iterator iterator(doubles);
632   int count = 0;
633   while (!iterator.Done()) {
634     // TODO(all): Is this supposed to restore just the callee-saved doubles? It
635     // looks like it's restoring all of them.
636     FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
637     __ Peek(value, count * kDoubleSize);
638     iterator.Advance();
639     count++;
640   }
641 }
642 
643 
GeneratePrologue()644 bool LCodeGen::GeneratePrologue() {
645   ASSERT(is_generating());
646 
647   if (info()->IsOptimizing()) {
648     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
649 
650     // TODO(all): Add support for stop_t FLAG in DEBUG mode.
651 
652     // Sloppy mode functions and builtins need to replace the receiver with the
653     // global proxy when called as functions (without an explicit receiver
654     // object).
655     if (info_->this_has_uses() &&
656         info_->strict_mode() == SLOPPY &&
657         !info_->is_native()) {
658       Label ok;
659       int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
660       __ Peek(x10, receiver_offset);
661       __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
662 
663       __ Ldr(x10, GlobalObjectMemOperand());
664       __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
665       __ Poke(x10, receiver_offset);
666 
667       __ Bind(&ok);
668     }
669   }
670 
671   ASSERT(__ StackPointer().Is(jssp));
672   info()->set_prologue_offset(masm_->pc_offset());
673   if (NeedsEagerFrame()) {
674     if (info()->IsStub()) {
675       __ StubPrologue();
676     } else {
677       __ Prologue(info()->IsCodePreAgingActive());
678     }
679     frame_is_built_ = true;
680     info_->AddNoFrameRange(0, masm_->pc_offset());
681   }
682 
683   // Reserve space for the stack slots needed by the code.
684   int slots = GetStackSlotCount();
685   if (slots > 0) {
686     __ Claim(slots, kPointerSize);
687   }
688 
689   if (info()->saves_caller_doubles()) {
690     SaveCallerDoubles();
691   }
692 
693   // Allocate a local context if needed.
694   int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
695   if (heap_slots > 0) {
696     Comment(";;; Allocate local context");
697     bool need_write_barrier = true;
698     // Argument to NewContext is the function, which is in x1.
699     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
700       FastNewContextStub stub(isolate(), heap_slots);
701       __ CallStub(&stub);
702       // Result of FastNewContextStub is always in new space.
703       need_write_barrier = false;
704     } else {
705       __ Push(x1);
706       __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
707     }
708     RecordSafepoint(Safepoint::kNoLazyDeopt);
709     // Context is returned in x0. It replaces the context passed to us. It's
710     // saved in the stack and kept live in cp.
711     __ Mov(cp, x0);
712     __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
713     // Copy any necessary parameters into the context.
714     int num_parameters = scope()->num_parameters();
715     for (int i = 0; i < num_parameters; i++) {
716       Variable* var = scope()->parameter(i);
717       if (var->IsContextSlot()) {
718         Register value = x0;
719         Register scratch = x3;
720 
721         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
722             (num_parameters - 1 - i) * kPointerSize;
723         // Load parameter from stack.
724         __ Ldr(value, MemOperand(fp, parameter_offset));
725         // Store it in the context.
726         MemOperand target = ContextMemOperand(cp, var->index());
727         __ Str(value, target);
728         // Update the write barrier. This clobbers value and scratch.
729         if (need_write_barrier) {
730           __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
731                                     GetLinkRegisterState(), kSaveFPRegs);
732         } else if (FLAG_debug_code) {
733           Label done;
734           __ JumpIfInNewSpace(cp, &done);
735           __ Abort(kExpectedNewSpaceObject);
736           __ bind(&done);
737         }
738       }
739     }
740     Comment(";;; End allocate local context");
741   }
742 
743   // Trace the call.
744   if (FLAG_trace && info()->IsOptimizing()) {
745     // We have not executed any compiled code yet, so cp still holds the
746     // incoming context.
747     __ CallRuntime(Runtime::kTraceEnter, 0);
748   }
749 
750   return !is_aborted();
751 }
752 
753 
GenerateOsrPrologue()754 void LCodeGen::GenerateOsrPrologue() {
755   // Generate the OSR entry prologue at the first unknown OSR value, or if there
756   // are none, at the OSR entrypoint instruction.
757   if (osr_pc_offset_ >= 0) return;
758 
759   osr_pc_offset_ = masm()->pc_offset();
760 
761   // Adjust the frame size, subsuming the unoptimized frame into the
762   // optimized frame.
763   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
764   ASSERT(slots >= 0);
765   __ Claim(slots);
766 }
767 
768 
GenerateBodyInstructionPre(LInstruction * instr)769 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
770   if (instr->IsCall()) {
771     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
772   }
773   if (!instr->IsLazyBailout() && !instr->IsGap()) {
774     safepoints_.BumpLastLazySafepointIndex();
775   }
776 }
777 
778 
GenerateDeferredCode()779 bool LCodeGen::GenerateDeferredCode() {
780   ASSERT(is_generating());
781   if (deferred_.length() > 0) {
782     for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
783       LDeferredCode* code = deferred_[i];
784 
785       HValue* value =
786           instructions_->at(code->instruction_index())->hydrogen_value();
787       RecordAndWritePosition(
788           chunk()->graph()->SourcePositionToScriptPosition(value->position()));
789 
790       Comment(";;; <@%d,#%d> "
791               "-------------------- Deferred %s --------------------",
792               code->instruction_index(),
793               code->instr()->hydrogen_value()->id(),
794               code->instr()->Mnemonic());
795 
796       __ Bind(code->entry());
797 
798       if (NeedsDeferredFrame()) {
799         Comment(";;; Build frame");
800         ASSERT(!frame_is_built_);
801         ASSERT(info()->IsStub());
802         frame_is_built_ = true;
803         __ Push(lr, fp, cp);
804         __ Mov(fp, Smi::FromInt(StackFrame::STUB));
805         __ Push(fp);
806         __ Add(fp, __ StackPointer(),
807                StandardFrameConstants::kFixedFrameSizeFromFp);
808         Comment(";;; Deferred code");
809       }
810 
811       code->Generate();
812 
813       if (NeedsDeferredFrame()) {
814         Comment(";;; Destroy frame");
815         ASSERT(frame_is_built_);
816         __ Pop(xzr, cp, fp, lr);
817         frame_is_built_ = false;
818       }
819 
820       __ B(code->exit());
821     }
822   }
823 
824   // Force constant pool emission at the end of the deferred code to make
825   // sure that no constant pools are emitted after deferred code because
826   // deferred code generation is the last step which generates code. The two
827   // following steps will only output data used by crakshaft.
828   masm()->CheckConstPool(true, false);
829 
830   return !is_aborted();
831 }
832 
833 
GenerateDeoptJumpTable()834 bool LCodeGen::GenerateDeoptJumpTable() {
835   Label needs_frame, restore_caller_doubles, call_deopt_entry;
836 
837   if (deopt_jump_table_.length() > 0) {
838     Comment(";;; -------------------- Jump table --------------------");
839     Address base = deopt_jump_table_[0]->address;
840 
841     UseScratchRegisterScope temps(masm());
842     Register entry_offset = temps.AcquireX();
843 
844     int length = deopt_jump_table_.length();
845     for (int i = 0; i < length; i++) {
846       __ Bind(&deopt_jump_table_[i]->label);
847 
848       Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type;
849       Address entry = deopt_jump_table_[i]->address;
850       int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
851       if (id == Deoptimizer::kNotDeoptimizationEntry) {
852         Comment(";;; jump table entry %d.", i);
853       } else {
854         Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
855       }
856 
857       // Second-level deopt table entries are contiguous and small, so instead
858       // of loading the full, absolute address of each one, load the base
859       // address and add an immediate offset.
860       __ Mov(entry_offset, entry - base);
861 
862       // The last entry can fall through into `call_deopt_entry`, avoiding a
863       // branch.
864       bool last_entry = (i + 1) == length;
865 
866       if (deopt_jump_table_[i]->needs_frame) {
867         ASSERT(!info()->saves_caller_doubles());
868         if (!needs_frame.is_bound()) {
869           // This variant of deopt can only be used with stubs. Since we don't
870           // have a function pointer to install in the stack frame that we're
871           // building, install a special marker there instead.
872           ASSERT(info()->IsStub());
873 
874           UseScratchRegisterScope temps(masm());
875           Register stub_marker = temps.AcquireX();
876           __ Bind(&needs_frame);
877           __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
878           __ Push(lr, fp, cp, stub_marker);
879           __ Add(fp, __ StackPointer(), 2 * kPointerSize);
880           if (!last_entry) __ B(&call_deopt_entry);
881         } else {
882           // Reuse the existing needs_frame code.
883           __ B(&needs_frame);
884         }
885       } else if (info()->saves_caller_doubles()) {
886         ASSERT(info()->IsStub());
887         if (!restore_caller_doubles.is_bound()) {
888           __ Bind(&restore_caller_doubles);
889           RestoreCallerDoubles();
890           if (!last_entry) __ B(&call_deopt_entry);
891         } else {
892           // Reuse the existing restore_caller_doubles code.
893           __ B(&restore_caller_doubles);
894         }
895       } else {
896         // There is nothing special to do, so just continue to the second-level
897         // table.
898         if (!last_entry) __ B(&call_deopt_entry);
899       }
900 
901       masm()->CheckConstPool(false, last_entry);
902     }
903 
904     // Generate common code for calling the second-level deopt table.
905     Register deopt_entry = temps.AcquireX();
906     __ Bind(&call_deopt_entry);
907     __ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base),
908                                 RelocInfo::RUNTIME_ENTRY));
909     __ Add(deopt_entry, deopt_entry, entry_offset);
910     __ Call(deopt_entry);
911   }
912 
913   // Force constant pool emission at the end of the deopt jump table to make
914   // sure that no constant pools are emitted after.
915   masm()->CheckConstPool(true, false);
916 
917   // The deoptimization jump table is the last part of the instruction
918   // sequence. Mark the generated code as done unless we bailed out.
919   if (!is_aborted()) status_ = DONE;
920   return !is_aborted();
921 }
922 
923 
GenerateSafepointTable()924 bool LCodeGen::GenerateSafepointTable() {
925   ASSERT(is_done());
926   // We do not know how much data will be emitted for the safepoint table, so
927   // force emission of the veneer pool.
928   masm()->CheckVeneerPool(true, true);
929   safepoints_.Emit(masm(), GetStackSlotCount());
930   return !is_aborted();
931 }
932 
933 
FinishCode(Handle<Code> code)934 void LCodeGen::FinishCode(Handle<Code> code) {
935   ASSERT(is_done());
936   code->set_stack_slots(GetStackSlotCount());
937   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
938   if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
939   PopulateDeoptimizationData(code);
940 }
941 
942 
PopulateDeoptimizationData(Handle<Code> code)943 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
944   int length = deoptimizations_.length();
945   if (length == 0) return;
946 
947   Handle<DeoptimizationInputData> data =
948       DeoptimizationInputData::New(isolate(), length, TENURED);
949 
950   Handle<ByteArray> translations =
951       translations_.CreateByteArray(isolate()->factory());
952   data->SetTranslationByteArray(*translations);
953   data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
954   data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
955   if (info_->IsOptimizing()) {
956     // Reference to shared function info does not change between phases.
957     AllowDeferredHandleDereference allow_handle_dereference;
958     data->SetSharedFunctionInfo(*info_->shared_info());
959   } else {
960     data->SetSharedFunctionInfo(Smi::FromInt(0));
961   }
962 
963   Handle<FixedArray> literals =
964       factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
965   { AllowDeferredHandleDereference copy_handles;
966     for (int i = 0; i < deoptimization_literals_.length(); i++) {
967       literals->set(i, *deoptimization_literals_[i]);
968     }
969     data->SetLiteralArray(*literals);
970   }
971 
972   data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
973   data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
974 
975   // Populate the deoptimization entries.
976   for (int i = 0; i < length; i++) {
977     LEnvironment* env = deoptimizations_[i];
978     data->SetAstId(i, env->ast_id());
979     data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
980     data->SetArgumentsStackHeight(i,
981                                   Smi::FromInt(env->arguments_stack_height()));
982     data->SetPc(i, Smi::FromInt(env->pc_offset()));
983   }
984 
985   code->set_deoptimization_data(*data);
986 }
987 
988 
PopulateDeoptimizationLiteralsWithInlinedFunctions()989 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
990   ASSERT(deoptimization_literals_.length() == 0);
991 
992   const ZoneList<Handle<JSFunction> >* inlined_closures =
993       chunk()->inlined_closures();
994 
995   for (int i = 0, length = inlined_closures->length(); i < length; i++) {
996     DefineDeoptimizationLiteral(inlined_closures->at(i));
997   }
998 
999   inlined_function_count_ = deoptimization_literals_.length();
1000 }
1001 
1002 
DeoptimizeBranch(LEnvironment * environment,BranchType branch_type,Register reg,int bit,Deoptimizer::BailoutType * override_bailout_type)1003 void LCodeGen::DeoptimizeBranch(
1004     LEnvironment* environment,
1005     BranchType branch_type, Register reg, int bit,
1006     Deoptimizer::BailoutType* override_bailout_type) {
1007   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1008   Deoptimizer::BailoutType bailout_type =
1009     info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
1010 
1011   if (override_bailout_type != NULL) {
1012     bailout_type = *override_bailout_type;
1013   }
1014 
1015   ASSERT(environment->HasBeenRegistered());
1016   ASSERT(info()->IsOptimizing() || info()->IsStub());
1017   int id = environment->deoptimization_index();
1018   Address entry =
1019       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1020 
1021   if (entry == NULL) {
1022     Abort(kBailoutWasNotPrepared);
1023   }
1024 
1025   if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
1026     Label not_zero;
1027     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1028 
1029     __ Push(x0, x1, x2);
1030     __ Mrs(x2, NZCV);
1031     __ Mov(x0, count);
1032     __ Ldr(w1, MemOperand(x0));
1033     __ Subs(x1, x1, 1);
1034     __ B(gt, &not_zero);
1035     __ Mov(w1, FLAG_deopt_every_n_times);
1036     __ Str(w1, MemOperand(x0));
1037     __ Pop(x2, x1, x0);
1038     ASSERT(frame_is_built_);
1039     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1040     __ Unreachable();
1041 
1042     __ Bind(&not_zero);
1043     __ Str(w1, MemOperand(x0));
1044     __ Msr(NZCV, x2);
1045     __ Pop(x2, x1, x0);
1046   }
1047 
1048   if (info()->ShouldTrapOnDeopt()) {
1049     Label dont_trap;
1050     __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
1051     __ Debug("trap_on_deopt", __LINE__, BREAK);
1052     __ Bind(&dont_trap);
1053   }
1054 
1055   ASSERT(info()->IsStub() || frame_is_built_);
1056   // Go through jump table if we need to build frame, or restore caller doubles.
1057   if (branch_type == always &&
1058       frame_is_built_ && !info()->saves_caller_doubles()) {
1059     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1060   } else {
1061     // We often have several deopts to the same entry, reuse the last
1062     // jump entry if this is the case.
1063     if (deopt_jump_table_.is_empty() ||
1064         (deopt_jump_table_.last()->address != entry) ||
1065         (deopt_jump_table_.last()->bailout_type != bailout_type) ||
1066         (deopt_jump_table_.last()->needs_frame != !frame_is_built_)) {
1067       Deoptimizer::JumpTableEntry* table_entry =
1068         new(zone()) Deoptimizer::JumpTableEntry(entry,
1069                                                 bailout_type,
1070                                                 !frame_is_built_);
1071       deopt_jump_table_.Add(table_entry, zone());
1072     }
1073     __ B(&deopt_jump_table_.last()->label,
1074          branch_type, reg, bit);
1075   }
1076 }
1077 
1078 
Deoptimize(LEnvironment * environment,Deoptimizer::BailoutType * override_bailout_type)1079 void LCodeGen::Deoptimize(LEnvironment* environment,
1080                           Deoptimizer::BailoutType* override_bailout_type) {
1081   DeoptimizeBranch(environment, always, NoReg, -1, override_bailout_type);
1082 }
1083 
1084 
DeoptimizeIf(Condition cond,LEnvironment * environment)1085 void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) {
1086   DeoptimizeBranch(environment, static_cast<BranchType>(cond));
1087 }
1088 
1089 
DeoptimizeIfZero(Register rt,LEnvironment * environment)1090 void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) {
1091   DeoptimizeBranch(environment, reg_zero, rt);
1092 }
1093 
1094 
DeoptimizeIfNotZero(Register rt,LEnvironment * environment)1095 void LCodeGen::DeoptimizeIfNotZero(Register rt, LEnvironment* environment) {
1096   DeoptimizeBranch(environment, reg_not_zero, rt);
1097 }
1098 
1099 
DeoptimizeIfNegative(Register rt,LEnvironment * environment)1100 void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
1101   int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
1102   DeoptimizeIfBitSet(rt, sign_bit, environment);
1103 }
1104 
1105 
DeoptimizeIfSmi(Register rt,LEnvironment * environment)1106 void LCodeGen::DeoptimizeIfSmi(Register rt,
1107                                LEnvironment* environment) {
1108   DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), environment);
1109 }
1110 
1111 
DeoptimizeIfNotSmi(Register rt,LEnvironment * environment)1112 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) {
1113   DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), environment);
1114 }
1115 
1116 
DeoptimizeIfRoot(Register rt,Heap::RootListIndex index,LEnvironment * environment)1117 void LCodeGen::DeoptimizeIfRoot(Register rt,
1118                                 Heap::RootListIndex index,
1119                                 LEnvironment* environment) {
1120   __ CompareRoot(rt, index);
1121   DeoptimizeIf(eq, environment);
1122 }
1123 
1124 
DeoptimizeIfNotRoot(Register rt,Heap::RootListIndex index,LEnvironment * environment)1125 void LCodeGen::DeoptimizeIfNotRoot(Register rt,
1126                                    Heap::RootListIndex index,
1127                                    LEnvironment* environment) {
1128   __ CompareRoot(rt, index);
1129   DeoptimizeIf(ne, environment);
1130 }
1131 
1132 
DeoptimizeIfMinusZero(DoubleRegister input,LEnvironment * environment)1133 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input,
1134                                      LEnvironment* environment) {
1135   __ TestForMinusZero(input);
1136   DeoptimizeIf(vs, environment);
1137 }
1138 
1139 
DeoptimizeIfBitSet(Register rt,int bit,LEnvironment * environment)1140 void LCodeGen::DeoptimizeIfBitSet(Register rt,
1141                                   int bit,
1142                                   LEnvironment* environment) {
1143   DeoptimizeBranch(environment, reg_bit_set, rt, bit);
1144 }
1145 
1146 
DeoptimizeIfBitClear(Register rt,int bit,LEnvironment * environment)1147 void LCodeGen::DeoptimizeIfBitClear(Register rt,
1148                                     int bit,
1149                                     LEnvironment* environment) {
1150   DeoptimizeBranch(environment, reg_bit_clear, rt, bit);
1151 }
1152 
1153 
EnsureSpaceForLazyDeopt(int space_needed)1154 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
1155   if (!info()->IsStub()) {
1156     // Ensure that we have enough space after the previous lazy-bailout
1157     // instruction for patching the code here.
1158     intptr_t current_pc = masm()->pc_offset();
1159 
1160     if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
1161       ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1162       ASSERT((padding_size % kInstructionSize) == 0);
1163       InstructionAccurateScope instruction_accurate(
1164           masm(), padding_size / kInstructionSize);
1165 
1166       while (padding_size > 0) {
1167         __ nop();
1168         padding_size -= kInstructionSize;
1169       }
1170     }
1171   }
1172   last_lazy_deopt_pc_ = masm()->pc_offset();
1173 }
1174 
1175 
ToRegister(LOperand * op) const1176 Register LCodeGen::ToRegister(LOperand* op) const {
1177   // TODO(all): support zero register results, as ToRegister32.
1178   ASSERT((op != NULL) && op->IsRegister());
1179   return Register::FromAllocationIndex(op->index());
1180 }
1181 
1182 
ToRegister32(LOperand * op) const1183 Register LCodeGen::ToRegister32(LOperand* op) const {
1184   ASSERT(op != NULL);
1185   if (op->IsConstantOperand()) {
1186     // If this is a constant operand, the result must be the zero register.
1187     ASSERT(ToInteger32(LConstantOperand::cast(op)) == 0);
1188     return wzr;
1189   } else {
1190     return ToRegister(op).W();
1191   }
1192 }
1193 
1194 
ToSmi(LConstantOperand * op) const1195 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
1196   HConstant* constant = chunk_->LookupConstant(op);
1197   return Smi::FromInt(constant->Integer32Value());
1198 }
1199 
1200 
ToDoubleRegister(LOperand * op) const1201 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
1202   ASSERT((op != NULL) && op->IsDoubleRegister());
1203   return DoubleRegister::FromAllocationIndex(op->index());
1204 }
1205 
1206 
ToOperand(LOperand * op)1207 Operand LCodeGen::ToOperand(LOperand* op) {
1208   ASSERT(op != NULL);
1209   if (op->IsConstantOperand()) {
1210     LConstantOperand* const_op = LConstantOperand::cast(op);
1211     HConstant* constant = chunk()->LookupConstant(const_op);
1212     Representation r = chunk_->LookupLiteralRepresentation(const_op);
1213     if (r.IsSmi()) {
1214       ASSERT(constant->HasSmiValue());
1215       return Operand(Smi::FromInt(constant->Integer32Value()));
1216     } else if (r.IsInteger32()) {
1217       ASSERT(constant->HasInteger32Value());
1218       return Operand(constant->Integer32Value());
1219     } else if (r.IsDouble()) {
1220       Abort(kToOperandUnsupportedDoubleImmediate);
1221     }
1222     ASSERT(r.IsTagged());
1223     return Operand(constant->handle(isolate()));
1224   } else if (op->IsRegister()) {
1225     return Operand(ToRegister(op));
1226   } else if (op->IsDoubleRegister()) {
1227     Abort(kToOperandIsDoubleRegisterUnimplemented);
1228     return Operand(0);
1229   }
1230   // Stack slots not implemented, use ToMemOperand instead.
1231   UNREACHABLE();
1232   return Operand(0);
1233 }
1234 
1235 
ToOperand32I(LOperand * op)1236 Operand LCodeGen::ToOperand32I(LOperand* op) {
1237   return ToOperand32(op, SIGNED_INT32);
1238 }
1239 
1240 
ToOperand32U(LOperand * op)1241 Operand LCodeGen::ToOperand32U(LOperand* op) {
1242   return ToOperand32(op, UNSIGNED_INT32);
1243 }
1244 
1245 
ToOperand32(LOperand * op,IntegerSignedness signedness)1246 Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
1247   ASSERT(op != NULL);
1248   if (op->IsRegister()) {
1249     return Operand(ToRegister32(op));
1250   } else if (op->IsConstantOperand()) {
1251     LConstantOperand* const_op = LConstantOperand::cast(op);
1252     HConstant* constant = chunk()->LookupConstant(const_op);
1253     Representation r = chunk_->LookupLiteralRepresentation(const_op);
1254     if (r.IsInteger32()) {
1255       ASSERT(constant->HasInteger32Value());
1256       return (signedness == SIGNED_INT32)
1257           ? Operand(constant->Integer32Value())
1258           : Operand(static_cast<uint32_t>(constant->Integer32Value()));
1259     } else {
1260       // Other constants not implemented.
1261       Abort(kToOperand32UnsupportedImmediate);
1262     }
1263   }
1264   // Other cases are not implemented.
1265   UNREACHABLE();
1266   return Operand(0);
1267 }
1268 
1269 
ArgumentsOffsetWithoutFrame(ptrdiff_t index)1270 static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) {
1271   ASSERT(index < 0);
1272   return -(index + 1) * kPointerSize;
1273 }
1274 
1275 
ToMemOperand(LOperand * op,StackMode stack_mode) const1276 MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
1277   ASSERT(op != NULL);
1278   ASSERT(!op->IsRegister());
1279   ASSERT(!op->IsDoubleRegister());
1280   ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
1281   if (NeedsEagerFrame()) {
1282     int fp_offset = StackSlotOffset(op->index());
1283     if (op->index() >= 0) {
1284       // Loads and stores have a bigger reach in positive offset than negative.
1285       // When the load or the store can't be done in one instruction via fp
1286       // (too big negative offset), we try to access via jssp (positive offset).
1287       // We can reference a stack slot from jssp only if jssp references the end
1288       // of the stack slots. It's not the case when:
1289       //  - stack_mode != kCanUseStackPointer: this is the case when a deferred
1290       //     code saved the registers.
1291       //  - after_push_argument_: arguments has been pushed for a call.
1292       //  - inlined_arguments_: inlined arguments have been pushed once. All the
1293       //     remainder of the function cannot trust jssp any longer.
1294       //  - saves_caller_doubles: some double registers have been pushed, jssp
1295       //     references the end of the double registers and not the end of the
1296       //     stack slots.
1297       // Also, if the offset from fp is small enough to make a load/store in
1298       // one instruction, we use a fp access.
1299       if ((stack_mode == kCanUseStackPointer) && !after_push_argument_ &&
1300           !inlined_arguments_ && !is_int9(fp_offset) &&
1301           !info()->saves_caller_doubles()) {
1302         int jssp_offset =
1303             (GetStackSlotCount() - op->index() - 1) * kPointerSize;
1304         return MemOperand(masm()->StackPointer(), jssp_offset);
1305       }
1306     }
1307     return MemOperand(fp, fp_offset);
1308   } else {
1309     // Retrieve parameter without eager stack-frame relative to the
1310     // stack-pointer.
1311     return MemOperand(masm()->StackPointer(),
1312                       ArgumentsOffsetWithoutFrame(op->index()));
1313   }
1314 }
1315 
1316 
ToHandle(LConstantOperand * op) const1317 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
1318   HConstant* constant = chunk_->LookupConstant(op);
1319   ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
1320   return constant->handle(isolate());
1321 }
1322 
1323 
1324 template<class LI>
ToShiftedRightOperand32(LOperand * right,LI * shift_info,IntegerSignedness signedness)1325 Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info,
1326                                           IntegerSignedness signedness) {
1327   if (shift_info->shift() == NO_SHIFT) {
1328     return (signedness == SIGNED_INT32) ? ToOperand32I(right)
1329                                         : ToOperand32U(right);
1330   } else {
1331     return Operand(
1332         ToRegister32(right),
1333         shift_info->shift(),
1334         JSShiftAmountFromLConstant(shift_info->shift_amount()));
1335   }
1336 }
1337 
1338 
IsSmi(LConstantOperand * op) const1339 bool LCodeGen::IsSmi(LConstantOperand* op) const {
1340   return chunk_->LookupLiteralRepresentation(op).IsSmi();
1341 }
1342 
1343 
IsInteger32Constant(LConstantOperand * op) const1344 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
1345   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
1346 }
1347 
1348 
ToInteger32(LConstantOperand * op) const1349 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
1350   HConstant* constant = chunk_->LookupConstant(op);
1351   return constant->Integer32Value();
1352 }
1353 
1354 
ToDouble(LConstantOperand * op) const1355 double LCodeGen::ToDouble(LConstantOperand* op) const {
1356   HConstant* constant = chunk_->LookupConstant(op);
1357   ASSERT(constant->HasDoubleValue());
1358   return constant->DoubleValue();
1359 }
1360 
1361 
TokenToCondition(Token::Value op,bool is_unsigned)1362 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1363   Condition cond = nv;
1364   switch (op) {
1365     case Token::EQ:
1366     case Token::EQ_STRICT:
1367       cond = eq;
1368       break;
1369     case Token::NE:
1370     case Token::NE_STRICT:
1371       cond = ne;
1372       break;
1373     case Token::LT:
1374       cond = is_unsigned ? lo : lt;
1375       break;
1376     case Token::GT:
1377       cond = is_unsigned ? hi : gt;
1378       break;
1379     case Token::LTE:
1380       cond = is_unsigned ? ls : le;
1381       break;
1382     case Token::GTE:
1383       cond = is_unsigned ? hs : ge;
1384       break;
1385     case Token::IN:
1386     case Token::INSTANCEOF:
1387     default:
1388       UNREACHABLE();
1389   }
1390   return cond;
1391 }
1392 
1393 
1394 template<class InstrType>
EmitBranchGeneric(InstrType instr,const BranchGenerator & branch)1395 void LCodeGen::EmitBranchGeneric(InstrType instr,
1396                                  const BranchGenerator& branch) {
1397   int left_block = instr->TrueDestination(chunk_);
1398   int right_block = instr->FalseDestination(chunk_);
1399 
1400   int next_block = GetNextEmittedBlock();
1401 
1402   if (right_block == left_block) {
1403     EmitGoto(left_block);
1404   } else if (left_block == next_block) {
1405     branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
1406   } else if (right_block == next_block) {
1407     branch.Emit(chunk_->GetAssemblyLabel(left_block));
1408   } else {
1409     branch.Emit(chunk_->GetAssemblyLabel(left_block));
1410     __ B(chunk_->GetAssemblyLabel(right_block));
1411   }
1412 }
1413 
1414 
1415 template<class InstrType>
EmitBranch(InstrType instr,Condition condition)1416 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
1417   ASSERT((condition != al) && (condition != nv));
1418   BranchOnCondition branch(this, condition);
1419   EmitBranchGeneric(instr, branch);
1420 }
1421 
1422 
1423 template<class InstrType>
EmitCompareAndBranch(InstrType instr,Condition condition,const Register & lhs,const Operand & rhs)1424 void LCodeGen::EmitCompareAndBranch(InstrType instr,
1425                                     Condition condition,
1426                                     const Register& lhs,
1427                                     const Operand& rhs) {
1428   ASSERT((condition != al) && (condition != nv));
1429   CompareAndBranch branch(this, condition, lhs, rhs);
1430   EmitBranchGeneric(instr, branch);
1431 }
1432 
1433 
1434 template<class InstrType>
EmitTestAndBranch(InstrType instr,Condition condition,const Register & value,uint64_t mask)1435 void LCodeGen::EmitTestAndBranch(InstrType instr,
1436                                  Condition condition,
1437                                  const Register& value,
1438                                  uint64_t mask) {
1439   ASSERT((condition != al) && (condition != nv));
1440   TestAndBranch branch(this, condition, value, mask);
1441   EmitBranchGeneric(instr, branch);
1442 }
1443 
1444 
1445 template<class InstrType>
EmitBranchIfNonZeroNumber(InstrType instr,const FPRegister & value,const FPRegister & scratch)1446 void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
1447                                          const FPRegister& value,
1448                                          const FPRegister& scratch) {
1449   BranchIfNonZeroNumber branch(this, value, scratch);
1450   EmitBranchGeneric(instr, branch);
1451 }
1452 
1453 
1454 template<class InstrType>
EmitBranchIfHeapNumber(InstrType instr,const Register & value)1455 void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
1456                                       const Register& value) {
1457   BranchIfHeapNumber branch(this, value);
1458   EmitBranchGeneric(instr, branch);
1459 }
1460 
1461 
1462 template<class InstrType>
EmitBranchIfRoot(InstrType instr,const Register & value,Heap::RootListIndex index)1463 void LCodeGen::EmitBranchIfRoot(InstrType instr,
1464                                 const Register& value,
1465                                 Heap::RootListIndex index) {
1466   BranchIfRoot branch(this, value, index);
1467   EmitBranchGeneric(instr, branch);
1468 }
1469 
1470 
DoGap(LGap * gap)1471 void LCodeGen::DoGap(LGap* gap) {
1472   for (int i = LGap::FIRST_INNER_POSITION;
1473        i <= LGap::LAST_INNER_POSITION;
1474        i++) {
1475     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1476     LParallelMove* move = gap->GetParallelMove(inner_pos);
1477     if (move != NULL) {
1478       resolver_.Resolve(move);
1479     }
1480   }
1481 }
1482 
1483 
DoAccessArgumentsAt(LAccessArgumentsAt * instr)1484 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
1485   Register arguments = ToRegister(instr->arguments());
1486   Register result = ToRegister(instr->result());
1487 
1488   // The pointer to the arguments array come from DoArgumentsElements.
1489   // It does not point directly to the arguments and there is an offest of
1490   // two words that we must take into account when accessing an argument.
1491   // Subtracting the index from length accounts for one, so we add one more.
1492 
1493   if (instr->length()->IsConstantOperand() &&
1494       instr->index()->IsConstantOperand()) {
1495     int index = ToInteger32(LConstantOperand::cast(instr->index()));
1496     int length = ToInteger32(LConstantOperand::cast(instr->length()));
1497     int offset = ((length - index) + 1) * kPointerSize;
1498     __ Ldr(result, MemOperand(arguments, offset));
1499   } else if (instr->index()->IsConstantOperand()) {
1500     Register length = ToRegister32(instr->length());
1501     int index = ToInteger32(LConstantOperand::cast(instr->index()));
1502     int loc = index - 1;
1503     if (loc != 0) {
1504       __ Sub(result.W(), length, loc);
1505       __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1506     } else {
1507       __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
1508     }
1509   } else {
1510     Register length = ToRegister32(instr->length());
1511     Operand index = ToOperand32I(instr->index());
1512     __ Sub(result.W(), length, index);
1513     __ Add(result.W(), result.W(), 1);
1514     __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1515   }
1516 }
1517 
1518 
DoAddE(LAddE * instr)1519 void LCodeGen::DoAddE(LAddE* instr) {
1520   Register result = ToRegister(instr->result());
1521   Register left = ToRegister(instr->left());
1522   Operand right = (instr->right()->IsConstantOperand())
1523       ? ToInteger32(LConstantOperand::cast(instr->right()))
1524       : Operand(ToRegister32(instr->right()), SXTW);
1525 
1526   ASSERT(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
1527   __ Add(result, left, right);
1528 }
1529 
1530 
DoAddI(LAddI * instr)1531 void LCodeGen::DoAddI(LAddI* instr) {
1532   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1533   Register result = ToRegister32(instr->result());
1534   Register left = ToRegister32(instr->left());
1535   Operand right = ToShiftedRightOperand32I(instr->right(), instr);
1536 
1537   if (can_overflow) {
1538     __ Adds(result, left, right);
1539     DeoptimizeIf(vs, instr->environment());
1540   } else {
1541     __ Add(result, left, right);
1542   }
1543 }
1544 
1545 
DoAddS(LAddS * instr)1546 void LCodeGen::DoAddS(LAddS* instr) {
1547   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1548   Register result = ToRegister(instr->result());
1549   Register left = ToRegister(instr->left());
1550   Operand right = ToOperand(instr->right());
1551   if (can_overflow) {
1552     __ Adds(result, left, right);
1553     DeoptimizeIf(vs, instr->environment());
1554   } else {
1555     __ Add(result, left, right);
1556   }
1557 }
1558 
1559 
DoAllocate(LAllocate * instr)1560 void LCodeGen::DoAllocate(LAllocate* instr) {
1561   class DeferredAllocate: public LDeferredCode {
1562    public:
1563     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
1564         : LDeferredCode(codegen), instr_(instr) { }
1565     virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
1566     virtual LInstruction* instr() { return instr_; }
1567    private:
1568     LAllocate* instr_;
1569   };
1570 
1571   DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
1572 
1573   Register result = ToRegister(instr->result());
1574   Register temp1 = ToRegister(instr->temp1());
1575   Register temp2 = ToRegister(instr->temp2());
1576 
1577   // Allocate memory for the object.
1578   AllocationFlags flags = TAG_OBJECT;
1579   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
1580     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
1581   }
1582 
1583   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1584     ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
1585     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1586     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
1587   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1588     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1589     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
1590   }
1591 
1592   if (instr->size()->IsConstantOperand()) {
1593     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1594     if (size <= Page::kMaxRegularHeapObjectSize) {
1595       __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
1596     } else {
1597       __ B(deferred->entry());
1598     }
1599   } else {
1600     Register size = ToRegister32(instr->size());
1601     __ Sxtw(size.X(), size);
1602     __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
1603   }
1604 
1605   __ Bind(deferred->exit());
1606 
1607   if (instr->hydrogen()->MustPrefillWithFiller()) {
1608     Register filler_count = temp1;
1609     Register filler = temp2;
1610     Register untagged_result = ToRegister(instr->temp3());
1611 
1612     if (instr->size()->IsConstantOperand()) {
1613       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1614       __ Mov(filler_count, size / kPointerSize);
1615     } else {
1616       __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2);
1617     }
1618 
1619     __ Sub(untagged_result, result, kHeapObjectTag);
1620     __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
1621     __ FillFields(untagged_result, filler_count, filler);
1622   } else {
1623     ASSERT(instr->temp3() == NULL);
1624   }
1625 }
1626 
1627 
DoDeferredAllocate(LAllocate * instr)1628 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
1629   // TODO(3095996): Get rid of this. For now, we need to make the
1630   // result register contain a valid pointer because it is already
1631   // contained in the register pointer map.
1632   __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
1633 
1634   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
1635   // We're in a SafepointRegistersScope so we can use any scratch registers.
1636   Register size = x0;
1637   if (instr->size()->IsConstantOperand()) {
1638     __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
1639   } else {
1640     __ SmiTag(size, ToRegister32(instr->size()).X());
1641   }
1642   int flags = AllocateDoubleAlignFlag::encode(
1643       instr->hydrogen()->MustAllocateDoubleAligned());
1644   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1645     ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
1646     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1647     flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
1648   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1649     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1650     flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
1651   } else {
1652     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
1653   }
1654   __ Mov(x10, Smi::FromInt(flags));
1655   __ Push(size, x10);
1656 
1657   CallRuntimeFromDeferred(
1658       Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
1659   __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
1660 }
1661 
1662 
DoApplyArguments(LApplyArguments * instr)1663 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1664   Register receiver = ToRegister(instr->receiver());
1665   Register function = ToRegister(instr->function());
1666   Register length = ToRegister32(instr->length());
1667 
1668   Register elements = ToRegister(instr->elements());
1669   Register scratch = x5;
1670   ASSERT(receiver.Is(x0));  // Used for parameter count.
1671   ASSERT(function.Is(x1));  // Required by InvokeFunction.
1672   ASSERT(ToRegister(instr->result()).Is(x0));
1673   ASSERT(instr->IsMarkedAsCall());
1674 
1675   // Copy the arguments to this function possibly from the
1676   // adaptor frame below it.
1677   const uint32_t kArgumentsLimit = 1 * KB;
1678   __ Cmp(length, kArgumentsLimit);
1679   DeoptimizeIf(hi, instr->environment());
1680 
1681   // Push the receiver and use the register to keep the original
1682   // number of arguments.
1683   __ Push(receiver);
1684   Register argc = receiver;
1685   receiver = NoReg;
1686   __ Sxtw(argc, length);
1687   // The arguments are at a one pointer size offset from elements.
1688   __ Add(elements, elements, 1 * kPointerSize);
1689 
1690   // Loop through the arguments pushing them onto the execution
1691   // stack.
1692   Label invoke, loop;
1693   // length is a small non-negative integer, due to the test above.
1694   __ Cbz(length, &invoke);
1695   __ Bind(&loop);
1696   __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
1697   __ Push(scratch);
1698   __ Subs(length, length, 1);
1699   __ B(ne, &loop);
1700 
1701   __ Bind(&invoke);
1702   ASSERT(instr->HasPointerMap());
1703   LPointerMap* pointers = instr->pointer_map();
1704   SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
1705   // The number of arguments is stored in argc (receiver) which is x0, as
1706   // expected by InvokeFunction.
1707   ParameterCount actual(argc);
1708   __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
1709 }
1710 
1711 
DoArgumentsElements(LArgumentsElements * instr)1712 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
1713   // We push some arguments and they will be pop in an other block. We can't
1714   // trust that jssp references the end of the stack slots until the end of
1715   // the function.
1716   inlined_arguments_ = true;
1717   Register result = ToRegister(instr->result());
1718 
1719   if (instr->hydrogen()->from_inlined()) {
1720     // When we are inside an inlined function, the arguments are the last things
1721     // that have been pushed on the stack. Therefore the arguments array can be
1722     // accessed directly from jssp.
1723     // However in the normal case, it is accessed via fp but there are two words
1724     // on the stack between fp and the arguments (the saved lr and fp) and the
1725     // LAccessArgumentsAt implementation take that into account.
1726     // In the inlined case we need to subtract the size of 2 words to jssp to
1727     // get a pointer which will work well with LAccessArgumentsAt.
1728     ASSERT(masm()->StackPointer().Is(jssp));
1729     __ Sub(result, jssp, 2 * kPointerSize);
1730   } else {
1731     ASSERT(instr->temp() != NULL);
1732     Register previous_fp = ToRegister(instr->temp());
1733 
1734     __ Ldr(previous_fp,
1735            MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1736     __ Ldr(result,
1737            MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
1738     __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1739     __ Csel(result, fp, previous_fp, ne);
1740   }
1741 }
1742 
1743 
DoArgumentsLength(LArgumentsLength * instr)1744 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
1745   Register elements = ToRegister(instr->elements());
1746   Register result = ToRegister32(instr->result());
1747   Label done;
1748 
1749   // If no arguments adaptor frame the number of arguments is fixed.
1750   __ Cmp(fp, elements);
1751   __ Mov(result, scope()->num_parameters());
1752   __ B(eq, &done);
1753 
1754   // Arguments adaptor frame present. Get argument length from there.
1755   __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1756   __ Ldr(result,
1757          UntagSmiMemOperand(result.X(),
1758                             ArgumentsAdaptorFrameConstants::kLengthOffset));
1759 
1760   // Argument length is in result register.
1761   __ Bind(&done);
1762 }
1763 
1764 
DoArithmeticD(LArithmeticD * instr)1765 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1766   DoubleRegister left = ToDoubleRegister(instr->left());
1767   DoubleRegister right = ToDoubleRegister(instr->right());
1768   DoubleRegister result = ToDoubleRegister(instr->result());
1769 
1770   switch (instr->op()) {
1771     case Token::ADD: __ Fadd(result, left, right); break;
1772     case Token::SUB: __ Fsub(result, left, right); break;
1773     case Token::MUL: __ Fmul(result, left, right); break;
1774     case Token::DIV: __ Fdiv(result, left, right); break;
1775     case Token::MOD: {
1776       // The ECMA-262 remainder operator is the remainder from a truncating
1777       // (round-towards-zero) division. Note that this differs from IEEE-754.
1778       //
1779       // TODO(jbramley): See if it's possible to do this inline, rather than by
1780       // calling a helper function. With frintz (to produce the intermediate
1781       // quotient) and fmsub (to calculate the remainder without loss of
1782       // precision), it should be possible. However, we would need support for
1783       // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
1784       // support that yet.
1785       ASSERT(left.Is(d0));
1786       ASSERT(right.Is(d1));
1787       __ CallCFunction(
1788           ExternalReference::mod_two_doubles_operation(isolate()),
1789           0, 2);
1790       ASSERT(result.Is(d0));
1791       break;
1792     }
1793     default:
1794       UNREACHABLE();
1795       break;
1796   }
1797 }
1798 
1799 
DoArithmeticT(LArithmeticT * instr)1800 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1801   ASSERT(ToRegister(instr->context()).is(cp));
1802   ASSERT(ToRegister(instr->left()).is(x1));
1803   ASSERT(ToRegister(instr->right()).is(x0));
1804   ASSERT(ToRegister(instr->result()).is(x0));
1805 
1806   BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
1807   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1808 }
1809 
1810 
DoBitI(LBitI * instr)1811 void LCodeGen::DoBitI(LBitI* instr) {
1812   Register result = ToRegister32(instr->result());
1813   Register left = ToRegister32(instr->left());
1814   Operand right = ToShiftedRightOperand32U(instr->right(), instr);
1815 
1816   switch (instr->op()) {
1817     case Token::BIT_AND: __ And(result, left, right); break;
1818     case Token::BIT_OR:  __ Orr(result, left, right); break;
1819     case Token::BIT_XOR: __ Eor(result, left, right); break;
1820     default:
1821       UNREACHABLE();
1822       break;
1823   }
1824 }
1825 
1826 
DoBitS(LBitS * instr)1827 void LCodeGen::DoBitS(LBitS* instr) {
1828   Register result = ToRegister(instr->result());
1829   Register left = ToRegister(instr->left());
1830   Operand right = ToOperand(instr->right());
1831 
1832   switch (instr->op()) {
1833     case Token::BIT_AND: __ And(result, left, right); break;
1834     case Token::BIT_OR:  __ Orr(result, left, right); break;
1835     case Token::BIT_XOR: __ Eor(result, left, right); break;
1836     default:
1837       UNREACHABLE();
1838       break;
1839   }
1840 }
1841 
1842 
DoBoundsCheck(LBoundsCheck * instr)1843 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
1844   Condition cond = instr->hydrogen()->allow_equality() ? hi : hs;
1845   ASSERT(instr->hydrogen()->index()->representation().IsInteger32());
1846   ASSERT(instr->hydrogen()->length()->representation().IsInteger32());
1847   if (instr->index()->IsConstantOperand()) {
1848     Operand index = ToOperand32I(instr->index());
1849     Register length = ToRegister32(instr->length());
1850     __ Cmp(length, index);
1851     cond = CommuteCondition(cond);
1852   } else {
1853     Register index = ToRegister32(instr->index());
1854     Operand length = ToOperand32I(instr->length());
1855     __ Cmp(index, length);
1856   }
1857   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
1858     __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
1859   } else {
1860     DeoptimizeIf(cond, instr->environment());
1861   }
1862 }
1863 
1864 
DoBranch(LBranch * instr)1865 void LCodeGen::DoBranch(LBranch* instr) {
1866   Representation r = instr->hydrogen()->value()->representation();
1867   Label* true_label = instr->TrueLabel(chunk_);
1868   Label* false_label = instr->FalseLabel(chunk_);
1869 
1870   if (r.IsInteger32()) {
1871     ASSERT(!info()->IsStub());
1872     EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
1873   } else if (r.IsSmi()) {
1874     ASSERT(!info()->IsStub());
1875     STATIC_ASSERT(kSmiTag == 0);
1876     EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
1877   } else if (r.IsDouble()) {
1878     DoubleRegister value = ToDoubleRegister(instr->value());
1879     // Test the double value. Zero and NaN are false.
1880     EmitBranchIfNonZeroNumber(instr, value, double_scratch());
1881   } else {
1882     ASSERT(r.IsTagged());
1883     Register value = ToRegister(instr->value());
1884     HType type = instr->hydrogen()->value()->type();
1885 
1886     if (type.IsBoolean()) {
1887       ASSERT(!info()->IsStub());
1888       __ CompareRoot(value, Heap::kTrueValueRootIndex);
1889       EmitBranch(instr, eq);
1890     } else if (type.IsSmi()) {
1891       ASSERT(!info()->IsStub());
1892       EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
1893     } else if (type.IsJSArray()) {
1894       ASSERT(!info()->IsStub());
1895       EmitGoto(instr->TrueDestination(chunk()));
1896     } else if (type.IsHeapNumber()) {
1897       ASSERT(!info()->IsStub());
1898       __ Ldr(double_scratch(), FieldMemOperand(value,
1899                                                HeapNumber::kValueOffset));
1900       // Test the double value. Zero and NaN are false.
1901       EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
1902     } else if (type.IsString()) {
1903       ASSERT(!info()->IsStub());
1904       Register temp = ToRegister(instr->temp1());
1905       __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
1906       EmitCompareAndBranch(instr, ne, temp, 0);
1907     } else {
1908       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1909       // Avoid deopts in the case where we've never executed this path before.
1910       if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
1911 
1912       if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1913         // undefined -> false.
1914         __ JumpIfRoot(
1915             value, Heap::kUndefinedValueRootIndex, false_label);
1916       }
1917 
1918       if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1919         // Boolean -> its value.
1920         __ JumpIfRoot(
1921             value, Heap::kTrueValueRootIndex, true_label);
1922         __ JumpIfRoot(
1923             value, Heap::kFalseValueRootIndex, false_label);
1924       }
1925 
1926       if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1927         // 'null' -> false.
1928         __ JumpIfRoot(
1929             value, Heap::kNullValueRootIndex, false_label);
1930       }
1931 
1932       if (expected.Contains(ToBooleanStub::SMI)) {
1933         // Smis: 0 -> false, all other -> true.
1934         ASSERT(Smi::FromInt(0) == 0);
1935         __ Cbz(value, false_label);
1936         __ JumpIfSmi(value, true_label);
1937       } else if (expected.NeedsMap()) {
1938         // If we need a map later and have a smi, deopt.
1939         DeoptimizeIfSmi(value, instr->environment());
1940       }
1941 
1942       Register map = NoReg;
1943       Register scratch = NoReg;
1944 
1945       if (expected.NeedsMap()) {
1946         ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
1947         map = ToRegister(instr->temp1());
1948         scratch = ToRegister(instr->temp2());
1949 
1950         __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
1951 
1952         if (expected.CanBeUndetectable()) {
1953           // Undetectable -> false.
1954           __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
1955           __ TestAndBranchIfAnySet(
1956               scratch, 1 << Map::kIsUndetectable, false_label);
1957         }
1958       }
1959 
1960       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1961         // spec object -> true.
1962         __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
1963         __ B(ge, true_label);
1964       }
1965 
1966       if (expected.Contains(ToBooleanStub::STRING)) {
1967         // String value -> false iff empty.
1968         Label not_string;
1969         __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
1970         __ B(ge, &not_string);
1971         __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
1972         __ Cbz(scratch, false_label);
1973         __ B(true_label);
1974         __ Bind(&not_string);
1975       }
1976 
1977       if (expected.Contains(ToBooleanStub::SYMBOL)) {
1978         // Symbol value -> true.
1979         __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
1980         __ B(eq, true_label);
1981       }
1982 
1983       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1984         Label not_heap_number;
1985         __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
1986 
1987         __ Ldr(double_scratch(),
1988                FieldMemOperand(value, HeapNumber::kValueOffset));
1989         __ Fcmp(double_scratch(), 0.0);
1990         // If we got a NaN (overflow bit is set), jump to the false branch.
1991         __ B(vs, false_label);
1992         __ B(eq, false_label);
1993         __ B(true_label);
1994         __ Bind(&not_heap_number);
1995       }
1996 
1997       if (!expected.IsGeneric()) {
1998         // We've seen something for the first time -> deopt.
1999         // This can only happen if we are not generic already.
2000         Deoptimize(instr->environment());
2001       }
2002     }
2003   }
2004 }
2005 
2006 
CallKnownFunction(Handle<JSFunction> function,int formal_parameter_count,int arity,LInstruction * instr,Register function_reg)2007 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2008                                  int formal_parameter_count,
2009                                  int arity,
2010                                  LInstruction* instr,
2011                                  Register function_reg) {
2012   bool dont_adapt_arguments =
2013       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
2014   bool can_invoke_directly =
2015       dont_adapt_arguments || formal_parameter_count == arity;
2016 
2017   // The function interface relies on the following register assignments.
2018   ASSERT(function_reg.Is(x1) || function_reg.IsNone());
2019   Register arity_reg = x0;
2020 
2021   LPointerMap* pointers = instr->pointer_map();
2022 
2023   // If necessary, load the function object.
2024   if (function_reg.IsNone()) {
2025     function_reg = x1;
2026     __ LoadObject(function_reg, function);
2027   }
2028 
2029   if (FLAG_debug_code) {
2030     Label is_not_smi;
2031     // Try to confirm that function_reg (x1) is a tagged pointer.
2032     __ JumpIfNotSmi(function_reg, &is_not_smi);
2033     __ Abort(kExpectedFunctionObject);
2034     __ Bind(&is_not_smi);
2035   }
2036 
2037   if (can_invoke_directly) {
2038     // Change context.
2039     __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
2040 
2041     // Set the arguments count if adaption is not needed. Assumes that x0 is
2042     // available to write to at this point.
2043     if (dont_adapt_arguments) {
2044       __ Mov(arity_reg, arity);
2045     }
2046 
2047     // Invoke function.
2048     __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
2049     __ Call(x10);
2050 
2051     // Set up deoptimization.
2052     RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2053   } else {
2054     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2055     ParameterCount count(arity);
2056     ParameterCount expected(formal_parameter_count);
2057     __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
2058   }
2059 }
2060 
2061 
DoCallWithDescriptor(LCallWithDescriptor * instr)2062 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
2063   ASSERT(instr->IsMarkedAsCall());
2064   ASSERT(ToRegister(instr->result()).Is(x0));
2065 
2066   LPointerMap* pointers = instr->pointer_map();
2067   SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2068 
2069   if (instr->target()->IsConstantOperand()) {
2070     LConstantOperand* target = LConstantOperand::cast(instr->target());
2071     Handle<Code> code = Handle<Code>::cast(ToHandle(target));
2072     generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
2073     // TODO(all): on ARM we use a call descriptor to specify a storage mode
2074     // but on ARM64 we only have one storage mode so it isn't necessary. Check
2075     // this understanding is correct.
2076     __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
2077   } else {
2078     ASSERT(instr->target()->IsRegister());
2079     Register target = ToRegister(instr->target());
2080     generator.BeforeCall(__ CallSize(target));
2081     __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
2082     __ Call(target);
2083   }
2084   generator.AfterCall();
2085   after_push_argument_ = false;
2086 }
2087 
2088 
DoCallJSFunction(LCallJSFunction * instr)2089 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
2090   ASSERT(instr->IsMarkedAsCall());
2091   ASSERT(ToRegister(instr->function()).is(x1));
2092 
2093   if (instr->hydrogen()->pass_argument_count()) {
2094     __ Mov(x0, Operand(instr->arity()));
2095   }
2096 
2097   // Change context.
2098   __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
2099 
2100   // Load the code entry address
2101   __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
2102   __ Call(x10);
2103 
2104   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2105   after_push_argument_ = false;
2106 }
2107 
2108 
DoCallRuntime(LCallRuntime * instr)2109 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2110   CallRuntime(instr->function(), instr->arity(), instr);
2111   after_push_argument_ = false;
2112 }
2113 
2114 
DoCallStub(LCallStub * instr)2115 void LCodeGen::DoCallStub(LCallStub* instr) {
2116   ASSERT(ToRegister(instr->context()).is(cp));
2117   ASSERT(ToRegister(instr->result()).is(x0));
2118   switch (instr->hydrogen()->major_key()) {
2119     case CodeStub::RegExpExec: {
2120       RegExpExecStub stub(isolate());
2121       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2122       break;
2123     }
2124     case CodeStub::SubString: {
2125       SubStringStub stub(isolate());
2126       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2127       break;
2128     }
2129     case CodeStub::StringCompare: {
2130       StringCompareStub stub(isolate());
2131       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2132       break;
2133     }
2134     default:
2135       UNREACHABLE();
2136   }
2137   after_push_argument_ = false;
2138 }
2139 
2140 
DoUnknownOSRValue(LUnknownOSRValue * instr)2141 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
2142   GenerateOsrPrologue();
2143 }
2144 
2145 
DoDeferredInstanceMigration(LCheckMaps * instr,Register object)2146 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
2147   Register temp = ToRegister(instr->temp());
2148   {
2149     PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2150     __ Push(object);
2151     __ Mov(cp, 0);
2152     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
2153     RecordSafepointWithRegisters(
2154         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
2155     __ StoreToSafepointRegisterSlot(x0, temp);
2156   }
2157   DeoptimizeIfSmi(temp, instr->environment());
2158 }
2159 
2160 
DoCheckMaps(LCheckMaps * instr)2161 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
2162   class DeferredCheckMaps: public LDeferredCode {
2163    public:
2164     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
2165         : LDeferredCode(codegen), instr_(instr), object_(object) {
2166       SetExit(check_maps());
2167     }
2168     virtual void Generate() {
2169       codegen()->DoDeferredInstanceMigration(instr_, object_);
2170     }
2171     Label* check_maps() { return &check_maps_; }
2172     virtual LInstruction* instr() { return instr_; }
2173    private:
2174     LCheckMaps* instr_;
2175     Label check_maps_;
2176     Register object_;
2177   };
2178 
2179   if (instr->hydrogen()->IsStabilityCheck()) {
2180     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
2181     for (int i = 0; i < maps->size(); ++i) {
2182       AddStabilityDependency(maps->at(i).handle());
2183     }
2184     return;
2185   }
2186 
2187   Register object = ToRegister(instr->value());
2188   Register map_reg = ToRegister(instr->temp());
2189 
2190   __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
2191 
2192   DeferredCheckMaps* deferred = NULL;
2193   if (instr->hydrogen()->HasMigrationTarget()) {
2194     deferred = new(zone()) DeferredCheckMaps(this, instr, object);
2195     __ Bind(deferred->check_maps());
2196   }
2197 
2198   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
2199   Label success;
2200   for (int i = 0; i < maps->size() - 1; i++) {
2201     Handle<Map> map = maps->at(i).handle();
2202     __ CompareMap(map_reg, map);
2203     __ B(eq, &success);
2204   }
2205   Handle<Map> map = maps->at(maps->size() - 1).handle();
2206   __ CompareMap(map_reg, map);
2207 
2208   // We didn't match a map.
2209   if (instr->hydrogen()->HasMigrationTarget()) {
2210     __ B(ne, deferred->entry());
2211   } else {
2212     DeoptimizeIf(ne, instr->environment());
2213   }
2214 
2215   __ Bind(&success);
2216 }
2217 
2218 
DoCheckNonSmi(LCheckNonSmi * instr)2219 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
2220   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2221     DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
2222   }
2223 }
2224 
2225 
DoCheckSmi(LCheckSmi * instr)2226 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2227   Register value = ToRegister(instr->value());
2228   ASSERT(!instr->result() || ToRegister(instr->result()).Is(value));
2229   DeoptimizeIfNotSmi(value, instr->environment());
2230 }
2231 
2232 
DoCheckInstanceType(LCheckInstanceType * instr)2233 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2234   Register input = ToRegister(instr->value());
2235   Register scratch = ToRegister(instr->temp());
2236 
2237   __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2238   __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2239 
2240   if (instr->hydrogen()->is_interval_check()) {
2241     InstanceType first, last;
2242     instr->hydrogen()->GetCheckInterval(&first, &last);
2243 
2244     __ Cmp(scratch, first);
2245     if (first == last) {
2246       // If there is only one type in the interval check for equality.
2247       DeoptimizeIf(ne, instr->environment());
2248     } else if (last == LAST_TYPE) {
2249       // We don't need to compare with the higher bound of the interval.
2250       DeoptimizeIf(lo, instr->environment());
2251     } else {
2252       // If we are below the lower bound, set the C flag and clear the Z flag
2253       // to force a deopt.
2254       __ Ccmp(scratch, last, CFlag, hs);
2255       DeoptimizeIf(hi, instr->environment());
2256     }
2257   } else {
2258     uint8_t mask;
2259     uint8_t tag;
2260     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
2261 
2262     if (IsPowerOf2(mask)) {
2263       ASSERT((tag == 0) || (tag == mask));
2264       if (tag == 0) {
2265         DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr->environment());
2266       } else {
2267         DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr->environment());
2268       }
2269     } else {
2270       if (tag == 0) {
2271         __ Tst(scratch, mask);
2272       } else {
2273         __ And(scratch, scratch, mask);
2274         __ Cmp(scratch, tag);
2275       }
2276       DeoptimizeIf(ne, instr->environment());
2277     }
2278   }
2279 }
2280 
2281 
DoClampDToUint8(LClampDToUint8 * instr)2282 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
2283   DoubleRegister input = ToDoubleRegister(instr->unclamped());
2284   Register result = ToRegister32(instr->result());
2285   __ ClampDoubleToUint8(result, input, double_scratch());
2286 }
2287 
2288 
DoClampIToUint8(LClampIToUint8 * instr)2289 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
2290   Register input = ToRegister32(instr->unclamped());
2291   Register result = ToRegister32(instr->result());
2292   __ ClampInt32ToUint8(result, input);
2293 }
2294 
2295 
DoClampTToUint8(LClampTToUint8 * instr)2296 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
2297   Register input = ToRegister(instr->unclamped());
2298   Register result = ToRegister32(instr->result());
2299   Register scratch = ToRegister(instr->temp1());
2300   Label done;
2301 
2302   // Both smi and heap number cases are handled.
2303   Label is_not_smi;
2304   __ JumpIfNotSmi(input, &is_not_smi);
2305   __ SmiUntag(result.X(), input);
2306   __ ClampInt32ToUint8(result);
2307   __ B(&done);
2308 
2309   __ Bind(&is_not_smi);
2310 
2311   // Check for heap number.
2312   Label is_heap_number;
2313   __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2314   __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number);
2315 
2316   // Check for undefined. Undefined is coverted to zero for clamping conversion.
2317   DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
2318                          instr->environment());
2319   __ Mov(result, 0);
2320   __ B(&done);
2321 
2322   // Heap number case.
2323   __ Bind(&is_heap_number);
2324   DoubleRegister dbl_scratch = double_scratch();
2325   DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2());
2326   __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
2327   __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
2328 
2329   __ Bind(&done);
2330 }
2331 
2332 
DoDoubleBits(LDoubleBits * instr)2333 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
2334   DoubleRegister value_reg = ToDoubleRegister(instr->value());
2335   Register result_reg = ToRegister(instr->result());
2336   if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
2337     __ Fmov(result_reg, value_reg);
2338     __ Lsr(result_reg, result_reg, 32);
2339   } else {
2340     __ Fmov(result_reg.W(), value_reg.S());
2341   }
2342 }
2343 
2344 
DoConstructDouble(LConstructDouble * instr)2345 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
2346   Register hi_reg = ToRegister(instr->hi());
2347   Register lo_reg = ToRegister(instr->lo());
2348   DoubleRegister result_reg = ToDoubleRegister(instr->result());
2349 
2350   // Insert the least significant 32 bits of hi_reg into the most significant
2351   // 32 bits of lo_reg, and move to a floating point register.
2352   __ Bfi(lo_reg, hi_reg, 32, 32);
2353   __ Fmov(result_reg, lo_reg);
2354 }
2355 
2356 
DoClassOfTestAndBranch(LClassOfTestAndBranch * instr)2357 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2358   Handle<String> class_name = instr->hydrogen()->class_name();
2359   Label* true_label = instr->TrueLabel(chunk_);
2360   Label* false_label = instr->FalseLabel(chunk_);
2361   Register input = ToRegister(instr->value());
2362   Register scratch1 = ToRegister(instr->temp1());
2363   Register scratch2 = ToRegister(instr->temp2());
2364 
2365   __ JumpIfSmi(input, false_label);
2366 
2367   Register map = scratch2;
2368   if (class_name->IsUtf8EqualTo(CStrVector("Function"))) {
2369     // Assuming the following assertions, we can use the same compares to test
2370     // for both being a function type and being in the object type range.
2371     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2372     STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2373                   FIRST_SPEC_OBJECT_TYPE + 1);
2374     STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2375                   LAST_SPEC_OBJECT_TYPE - 1);
2376     STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2377 
2378     // We expect CompareObjectType to load the object instance type in scratch1.
2379     __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
2380     __ B(lt, false_label);
2381     __ B(eq, true_label);
2382     __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
2383     __ B(eq, true_label);
2384   } else {
2385     __ IsObjectJSObjectType(input, map, scratch1, false_label);
2386   }
2387 
2388   // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2389   // Check if the constructor in the map is a function.
2390   __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
2391 
2392   // Objects with a non-function constructor have class 'Object'.
2393   if (class_name->IsUtf8EqualTo(CStrVector("Object"))) {
2394     __ JumpIfNotObjectType(
2395         scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
2396   } else {
2397     __ JumpIfNotObjectType(
2398         scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label);
2399   }
2400 
2401   // The constructor function is in scratch1. Get its instance class name.
2402   __ Ldr(scratch1,
2403          FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
2404   __ Ldr(scratch1,
2405          FieldMemOperand(scratch1,
2406                          SharedFunctionInfo::kInstanceClassNameOffset));
2407 
2408   // The class name we are testing against is internalized since it's a literal.
2409   // The name in the constructor is internalized because of the way the context
2410   // is booted. This routine isn't expected to work for random API-created
2411   // classes and it doesn't have to because you can't access it with natives
2412   // syntax. Since both sides are internalized it is sufficient to use an
2413   // identity comparison.
2414   EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
2415 }
2416 
2417 
DoCmpHoleAndBranchD(LCmpHoleAndBranchD * instr)2418 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
2419   ASSERT(instr->hydrogen()->representation().IsDouble());
2420   FPRegister object = ToDoubleRegister(instr->object());
2421   Register temp = ToRegister(instr->temp());
2422 
2423   // If we don't have a NaN, we don't have the hole, so branch now to avoid the
2424   // (relatively expensive) hole-NaN check.
2425   __ Fcmp(object, object);
2426   __ B(vc, instr->FalseLabel(chunk_));
2427 
2428   // We have a NaN, but is it the hole?
2429   __ Fmov(temp, object);
2430   EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
2431 }
2432 
2433 
DoCmpHoleAndBranchT(LCmpHoleAndBranchT * instr)2434 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
2435   ASSERT(instr->hydrogen()->representation().IsTagged());
2436   Register object = ToRegister(instr->object());
2437 
2438   EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
2439 }
2440 
2441 
DoCmpMapAndBranch(LCmpMapAndBranch * instr)2442 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2443   Register value = ToRegister(instr->value());
2444   Register map = ToRegister(instr->temp());
2445 
2446   __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
2447   EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
2448 }
2449 
2450 
DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch * instr)2451 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2452   Representation rep = instr->hydrogen()->value()->representation();
2453   ASSERT(!rep.IsInteger32());
2454   Register scratch = ToRegister(instr->temp());
2455 
2456   if (rep.IsDouble()) {
2457     __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
2458                        instr->TrueLabel(chunk()));
2459   } else {
2460     Register value = ToRegister(instr->value());
2461     __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
2462                 instr->FalseLabel(chunk()), DO_SMI_CHECK);
2463     __ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
2464     __ JumpIfMinusZero(scratch, instr->TrueLabel(chunk()));
2465   }
2466   EmitGoto(instr->FalseDestination(chunk()));
2467 }
2468 
2469 
DoCompareNumericAndBranch(LCompareNumericAndBranch * instr)2470 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2471   LOperand* left = instr->left();
2472   LOperand* right = instr->right();
2473   bool is_unsigned =
2474       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2475       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2476   Condition cond = TokenToCondition(instr->op(), is_unsigned);
2477 
2478   if (left->IsConstantOperand() && right->IsConstantOperand()) {
2479     // We can statically evaluate the comparison.
2480     double left_val = ToDouble(LConstantOperand::cast(left));
2481     double right_val = ToDouble(LConstantOperand::cast(right));
2482     int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2483         instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2484     EmitGoto(next_block);
2485   } else {
2486     if (instr->is_double()) {
2487       if (right->IsConstantOperand()) {
2488         __ Fcmp(ToDoubleRegister(left),
2489                 ToDouble(LConstantOperand::cast(right)));
2490       } else if (left->IsConstantOperand()) {
2491         // Commute the operands and the condition.
2492         __ Fcmp(ToDoubleRegister(right),
2493                 ToDouble(LConstantOperand::cast(left)));
2494         cond = CommuteCondition(cond);
2495       } else {
2496         __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
2497       }
2498 
2499       // If a NaN is involved, i.e. the result is unordered (V set),
2500       // jump to false block label.
2501       __ B(vs, instr->FalseLabel(chunk_));
2502       EmitBranch(instr, cond);
2503     } else {
2504       if (instr->hydrogen_value()->representation().IsInteger32()) {
2505         if (right->IsConstantOperand()) {
2506           EmitCompareAndBranch(instr,
2507                                cond,
2508                                ToRegister32(left),
2509                                ToOperand32I(right));
2510         } else {
2511           // Commute the operands and the condition.
2512           EmitCompareAndBranch(instr,
2513                                CommuteCondition(cond),
2514                                ToRegister32(right),
2515                                ToOperand32I(left));
2516         }
2517       } else {
2518         ASSERT(instr->hydrogen_value()->representation().IsSmi());
2519         if (right->IsConstantOperand()) {
2520           int32_t value = ToInteger32(LConstantOperand::cast(right));
2521           EmitCompareAndBranch(instr,
2522                                cond,
2523                                ToRegister(left),
2524                                Operand(Smi::FromInt(value)));
2525         } else if (left->IsConstantOperand()) {
2526           // Commute the operands and the condition.
2527           int32_t value = ToInteger32(LConstantOperand::cast(left));
2528           EmitCompareAndBranch(instr,
2529                                CommuteCondition(cond),
2530                                ToRegister(right),
2531                                Operand(Smi::FromInt(value)));
2532         } else {
2533           EmitCompareAndBranch(instr,
2534                                cond,
2535                                ToRegister(left),
2536                                ToRegister(right));
2537         }
2538       }
2539     }
2540   }
2541 }
2542 
2543 
DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch * instr)2544 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2545   Register left = ToRegister(instr->left());
2546   Register right = ToRegister(instr->right());
2547   EmitCompareAndBranch(instr, eq, left, right);
2548 }
2549 
2550 
DoCmpT(LCmpT * instr)2551 void LCodeGen::DoCmpT(LCmpT* instr) {
2552   ASSERT(ToRegister(instr->context()).is(cp));
2553   Token::Value op = instr->op();
2554   Condition cond = TokenToCondition(op, false);
2555 
2556   ASSERT(ToRegister(instr->left()).Is(x1));
2557   ASSERT(ToRegister(instr->right()).Is(x0));
2558   Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2559   CallCode(ic, RelocInfo::CODE_TARGET, instr);
2560   // Signal that we don't inline smi code before this stub.
2561   InlineSmiCheckInfo::EmitNotInlined(masm());
2562 
2563   // Return true or false depending on CompareIC result.
2564   // This instruction is marked as call. We can clobber any register.
2565   ASSERT(instr->IsMarkedAsCall());
2566   __ LoadTrueFalseRoots(x1, x2);
2567   __ Cmp(x0, 0);
2568   __ Csel(ToRegister(instr->result()), x1, x2, cond);
2569 }
2570 
2571 
DoConstantD(LConstantD * instr)2572 void LCodeGen::DoConstantD(LConstantD* instr) {
2573   ASSERT(instr->result()->IsDoubleRegister());
2574   DoubleRegister result = ToDoubleRegister(instr->result());
2575   if (instr->value() == 0) {
2576     if (copysign(1.0, instr->value()) == 1.0) {
2577       __ Fmov(result, fp_zero);
2578     } else {
2579       __ Fneg(result, fp_zero);
2580     }
2581   } else {
2582     __ Fmov(result, instr->value());
2583   }
2584 }
2585 
2586 
DoConstantE(LConstantE * instr)2587 void LCodeGen::DoConstantE(LConstantE* instr) {
2588   __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2589 }
2590 
2591 
DoConstantI(LConstantI * instr)2592 void LCodeGen::DoConstantI(LConstantI* instr) {
2593   ASSERT(is_int32(instr->value()));
2594   // Cast the value here to ensure that the value isn't sign extended by the
2595   // implicit Operand constructor.
2596   __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
2597 }
2598 
2599 
DoConstantS(LConstantS * instr)2600 void LCodeGen::DoConstantS(LConstantS* instr) {
2601   __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2602 }
2603 
2604 
DoConstantT(LConstantT * instr)2605 void LCodeGen::DoConstantT(LConstantT* instr) {
2606   Handle<Object> object = instr->value(isolate());
2607   AllowDeferredHandleDereference smi_check;
2608   __ LoadObject(ToRegister(instr->result()), object);
2609 }
2610 
2611 
DoContext(LContext * instr)2612 void LCodeGen::DoContext(LContext* instr) {
2613   // If there is a non-return use, the context must be moved to a register.
2614   Register result = ToRegister(instr->result());
2615   if (info()->IsOptimizing()) {
2616     __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
2617   } else {
2618     // If there is no frame, the context must be in cp.
2619     ASSERT(result.is(cp));
2620   }
2621 }
2622 
2623 
DoCheckValue(LCheckValue * instr)2624 void LCodeGen::DoCheckValue(LCheckValue* instr) {
2625   Register reg = ToRegister(instr->value());
2626   Handle<HeapObject> object = instr->hydrogen()->object().handle();
2627   AllowDeferredHandleDereference smi_check;
2628   if (isolate()->heap()->InNewSpace(*object)) {
2629     UseScratchRegisterScope temps(masm());
2630     Register temp = temps.AcquireX();
2631     Handle<Cell> cell = isolate()->factory()->NewCell(object);
2632     __ Mov(temp, Operand(Handle<Object>(cell)));
2633     __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
2634     __ Cmp(reg, temp);
2635   } else {
2636     __ Cmp(reg, Operand(object));
2637   }
2638   DeoptimizeIf(ne, instr->environment());
2639 }
2640 
2641 
DoLazyBailout(LLazyBailout * instr)2642 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
2643   last_lazy_deopt_pc_ = masm()->pc_offset();
2644   ASSERT(instr->HasEnvironment());
2645   LEnvironment* env = instr->environment();
2646   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
2647   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2648 }
2649 
2650 
DoDateField(LDateField * instr)2651 void LCodeGen::DoDateField(LDateField* instr) {
2652   Register object = ToRegister(instr->date());
2653   Register result = ToRegister(instr->result());
2654   Register temp1 = x10;
2655   Register temp2 = x11;
2656   Smi* index = instr->index();
2657   Label runtime, done;
2658 
2659   ASSERT(object.is(result) && object.Is(x0));
2660   ASSERT(instr->IsMarkedAsCall());
2661 
2662   DeoptimizeIfSmi(object, instr->environment());
2663   __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
2664   DeoptimizeIf(ne, instr->environment());
2665 
2666   if (index->value() == 0) {
2667     __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
2668   } else {
2669     if (index->value() < JSDate::kFirstUncachedField) {
2670       ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2671       __ Mov(temp1, Operand(stamp));
2672       __ Ldr(temp1, MemOperand(temp1));
2673       __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
2674       __ Cmp(temp1, temp2);
2675       __ B(ne, &runtime);
2676       __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
2677                                              kPointerSize * index->value()));
2678       __ B(&done);
2679     }
2680 
2681     __ Bind(&runtime);
2682     __ Mov(x1, Operand(index));
2683     __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2684   }
2685 
2686   __ Bind(&done);
2687 }
2688 
2689 
DoDeoptimize(LDeoptimize * instr)2690 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
2691   Deoptimizer::BailoutType type = instr->hydrogen()->type();
2692   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
2693   // needed return address), even though the implementation of LAZY and EAGER is
2694   // now identical. When LAZY is eventually completely folded into EAGER, remove
2695   // the special case below.
2696   if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
2697     type = Deoptimizer::LAZY;
2698   }
2699 
2700   Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
2701   Deoptimize(instr->environment(), &type);
2702 }
2703 
2704 
DoDivByPowerOf2I(LDivByPowerOf2I * instr)2705 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
2706   Register dividend = ToRegister32(instr->dividend());
2707   int32_t divisor = instr->divisor();
2708   Register result = ToRegister32(instr->result());
2709   ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
2710   ASSERT(!result.is(dividend));
2711 
2712   // Check for (0 / -x) that will produce negative zero.
2713   HDiv* hdiv = instr->hydrogen();
2714   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2715     DeoptimizeIfZero(dividend, instr->environment());
2716   }
2717   // Check for (kMinInt / -1).
2718   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
2719     // Test dividend for kMinInt by subtracting one (cmp) and checking for
2720     // overflow.
2721     __ Cmp(dividend, 1);
2722     DeoptimizeIf(vs, instr->environment());
2723   }
2724   // Deoptimize if remainder will not be 0.
2725   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
2726       divisor != 1 && divisor != -1) {
2727     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
2728     __ Tst(dividend, mask);
2729     DeoptimizeIf(ne, instr->environment());
2730   }
2731 
2732   if (divisor == -1) {  // Nice shortcut, not needed for correctness.
2733     __ Neg(result, dividend);
2734     return;
2735   }
2736   int32_t shift = WhichPowerOf2Abs(divisor);
2737   if (shift == 0) {
2738     __ Mov(result, dividend);
2739   } else if (shift == 1) {
2740     __ Add(result, dividend, Operand(dividend, LSR, 31));
2741   } else {
2742     __ Mov(result, Operand(dividend, ASR, 31));
2743     __ Add(result, dividend, Operand(result, LSR, 32 - shift));
2744   }
2745   if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
2746   if (divisor < 0) __ Neg(result, result);
2747 }
2748 
2749 
DoDivByConstI(LDivByConstI * instr)2750 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
2751   Register dividend = ToRegister32(instr->dividend());
2752   int32_t divisor = instr->divisor();
2753   Register result = ToRegister32(instr->result());
2754   ASSERT(!AreAliased(dividend, result));
2755 
2756   if (divisor == 0) {
2757     Deoptimize(instr->environment());
2758     return;
2759   }
2760 
2761   // Check for (0 / -x) that will produce negative zero.
2762   HDiv* hdiv = instr->hydrogen();
2763   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2764     DeoptimizeIfZero(dividend, instr->environment());
2765   }
2766 
2767   __ TruncatingDiv(result, dividend, Abs(divisor));
2768   if (divisor < 0) __ Neg(result, result);
2769 
2770   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
2771     Register temp = ToRegister32(instr->temp());
2772     ASSERT(!AreAliased(dividend, result, temp));
2773     __ Sxtw(dividend.X(), dividend);
2774     __ Mov(temp, divisor);
2775     __ Smsubl(temp.X(), result, temp, dividend.X());
2776     DeoptimizeIfNotZero(temp, instr->environment());
2777   }
2778 }
2779 
2780 
2781 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
DoDivI(LDivI * instr)2782 void LCodeGen::DoDivI(LDivI* instr) {
2783   HBinaryOperation* hdiv = instr->hydrogen();
2784   Register dividend = ToRegister32(instr->dividend());
2785   Register divisor = ToRegister32(instr->divisor());
2786   Register result = ToRegister32(instr->result());
2787 
2788   // Issue the division first, and then check for any deopt cases whilst the
2789   // result is computed.
2790   __ Sdiv(result, dividend, divisor);
2791 
2792   if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
2793     ASSERT_EQ(NULL, instr->temp());
2794     return;
2795   }
2796 
2797   // Check for x / 0.
2798   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
2799     DeoptimizeIfZero(divisor, instr->environment());
2800   }
2801 
2802   // Check for (0 / -x) as that will produce negative zero.
2803   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
2804     __ Cmp(divisor, 0);
2805 
2806     // If the divisor < 0 (mi), compare the dividend, and deopt if it is
2807     // zero, ie. zero dividend with negative divisor deopts.
2808     // If the divisor >= 0 (pl, the opposite of mi) set the flags to
2809     // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
2810     __ Ccmp(dividend, 0, NoFlag, mi);
2811     DeoptimizeIf(eq, instr->environment());
2812   }
2813 
2814   // Check for (kMinInt / -1).
2815   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
2816     // Test dividend for kMinInt by subtracting one (cmp) and checking for
2817     // overflow.
2818     __ Cmp(dividend, 1);
2819     // If overflow is set, ie. dividend = kMinInt, compare the divisor with
2820     // -1. If overflow is clear, set the flags for condition ne, as the
2821     // dividend isn't -1, and thus we shouldn't deopt.
2822     __ Ccmp(divisor, -1, NoFlag, vs);
2823     DeoptimizeIf(eq, instr->environment());
2824   }
2825 
2826   // Compute remainder and deopt if it's not zero.
2827   Register remainder = ToRegister32(instr->temp());
2828   __ Msub(remainder, result, divisor, dividend);
2829   DeoptimizeIfNotZero(remainder, instr->environment());
2830 }
2831 
2832 
DoDoubleToIntOrSmi(LDoubleToIntOrSmi * instr)2833 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
2834   DoubleRegister input = ToDoubleRegister(instr->value());
2835   Register result = ToRegister32(instr->result());
2836 
2837   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2838     DeoptimizeIfMinusZero(input, instr->environment());
2839   }
2840 
2841   __ TryRepresentDoubleAsInt32(result, input, double_scratch());
2842   DeoptimizeIf(ne, instr->environment());
2843 
2844   if (instr->tag_result()) {
2845     __ SmiTag(result.X());
2846   }
2847 }
2848 
2849 
DoDrop(LDrop * instr)2850 void LCodeGen::DoDrop(LDrop* instr) {
2851   __ Drop(instr->count());
2852 }
2853 
2854 
DoDummy(LDummy * instr)2855 void LCodeGen::DoDummy(LDummy* instr) {
2856   // Nothing to see here, move on!
2857 }
2858 
2859 
DoDummyUse(LDummyUse * instr)2860 void LCodeGen::DoDummyUse(LDummyUse* instr) {
2861   // Nothing to see here, move on!
2862 }
2863 
2864 
DoFunctionLiteral(LFunctionLiteral * instr)2865 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
2866   ASSERT(ToRegister(instr->context()).is(cp));
2867   // FunctionLiteral instruction is marked as call, we can trash any register.
2868   ASSERT(instr->IsMarkedAsCall());
2869 
2870   // Use the fast case closure allocation code that allocates in new
2871   // space for nested functions that don't need literals cloning.
2872   bool pretenure = instr->hydrogen()->pretenure();
2873   if (!pretenure && instr->hydrogen()->has_no_literals()) {
2874     FastNewClosureStub stub(isolate(),
2875                             instr->hydrogen()->strict_mode(),
2876                             instr->hydrogen()->is_generator());
2877     __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2878     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2879   } else {
2880     __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2881     __ Mov(x1, Operand(pretenure ? factory()->true_value()
2882                                  : factory()->false_value()));
2883     __ Push(cp, x2, x1);
2884     CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
2885   }
2886 }
2887 
2888 
DoForInCacheArray(LForInCacheArray * instr)2889 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
2890   Register map = ToRegister(instr->map());
2891   Register result = ToRegister(instr->result());
2892   Label load_cache, done;
2893 
2894   __ EnumLengthUntagged(result, map);
2895   __ Cbnz(result, &load_cache);
2896 
2897   __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
2898   __ B(&done);
2899 
2900   __ Bind(&load_cache);
2901   __ LoadInstanceDescriptors(map, result);
2902   __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
2903   __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
2904   DeoptimizeIfZero(result, instr->environment());
2905 
2906   __ Bind(&done);
2907 }
2908 
2909 
DoForInPrepareMap(LForInPrepareMap * instr)2910 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
2911   Register object = ToRegister(instr->object());
2912   Register null_value = x5;
2913 
2914   ASSERT(instr->IsMarkedAsCall());
2915   ASSERT(object.Is(x0));
2916 
2917   DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex,
2918                    instr->environment());
2919 
2920   __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2921   __ Cmp(object, null_value);
2922   DeoptimizeIf(eq, instr->environment());
2923 
2924   DeoptimizeIfSmi(object, instr->environment());
2925 
2926   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
2927   __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
2928   DeoptimizeIf(le, instr->environment());
2929 
2930   Label use_cache, call_runtime;
2931   __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
2932 
2933   __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2934   __ B(&use_cache);
2935 
2936   // Get the set of properties to enumerate.
2937   __ Bind(&call_runtime);
2938   __ Push(object);
2939   CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
2940 
2941   __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
2942   DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr->environment());
2943 
2944   __ Bind(&use_cache);
2945 }
2946 
2947 
DoGetCachedArrayIndex(LGetCachedArrayIndex * instr)2948 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2949   Register input = ToRegister(instr->value());
2950   Register result = ToRegister(instr->result());
2951 
2952   __ AssertString(input);
2953 
2954   // Assert that we can use a W register load to get the hash.
2955   ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
2956   __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
2957   __ IndexFromHash(result, result);
2958 }
2959 
2960 
EmitGoto(int block)2961 void LCodeGen::EmitGoto(int block) {
2962   // Do not emit jump if we are emitting a goto to the next block.
2963   if (!IsNextEmittedBlock(block)) {
2964     __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
2965   }
2966 }
2967 
2968 
DoGoto(LGoto * instr)2969 void LCodeGen::DoGoto(LGoto* instr) {
2970   EmitGoto(instr->block_id());
2971 }
2972 
2973 
DoHasCachedArrayIndexAndBranch(LHasCachedArrayIndexAndBranch * instr)2974 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2975     LHasCachedArrayIndexAndBranch* instr) {
2976   Register input = ToRegister(instr->value());
2977   Register temp = ToRegister32(instr->temp());
2978 
2979   // Assert that the cache status bits fit in a W register.
2980   ASSERT(is_uint32(String::kContainsCachedArrayIndexMask));
2981   __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
2982   __ Tst(temp, String::kContainsCachedArrayIndexMask);
2983   EmitBranch(instr, eq);
2984 }
2985 
2986 
2987 // HHasInstanceTypeAndBranch instruction is built with an interval of type
2988 // to test but is only used in very restricted ways. The only possible kinds
2989 // of intervals are:
2990 //  - [ FIRST_TYPE, instr->to() ]
2991 //  - [ instr->form(), LAST_TYPE ]
2992 //  - instr->from() == instr->to()
2993 //
2994 // These kinds of intervals can be check with only one compare instruction
2995 // providing the correct value and test condition are used.
2996 //
2997 // TestType() will return the value to use in the compare instruction and
2998 // BranchCondition() will return the condition to use depending on the kind
2999 // of interval actually specified in the instruction.
TestType(HHasInstanceTypeAndBranch * instr)3000 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
3001   InstanceType from = instr->from();
3002   InstanceType to = instr->to();
3003   if (from == FIRST_TYPE) return to;
3004   ASSERT((from == to) || (to == LAST_TYPE));
3005   return from;
3006 }
3007 
3008 
3009 // See comment above TestType function for what this function does.
BranchCondition(HHasInstanceTypeAndBranch * instr)3010 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
3011   InstanceType from = instr->from();
3012   InstanceType to = instr->to();
3013   if (from == to) return eq;
3014   if (to == LAST_TYPE) return hs;
3015   if (from == FIRST_TYPE) return ls;
3016   UNREACHABLE();
3017   return eq;
3018 }
3019 
3020 
DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch * instr)3021 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
3022   Register input = ToRegister(instr->value());
3023   Register scratch = ToRegister(instr->temp());
3024 
3025   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
3026     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3027   }
3028   __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
3029   EmitBranch(instr, BranchCondition(instr->hydrogen()));
3030 }
3031 
3032 
DoInnerAllocatedObject(LInnerAllocatedObject * instr)3033 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3034   Register result = ToRegister(instr->result());
3035   Register base = ToRegister(instr->base_object());
3036   if (instr->offset()->IsConstantOperand()) {
3037     __ Add(result, base, ToOperand32I(instr->offset()));
3038   } else {
3039     __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
3040   }
3041 }
3042 
3043 
DoInstanceOf(LInstanceOf * instr)3044 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
3045   ASSERT(ToRegister(instr->context()).is(cp));
3046   // Assert that the arguments are in the registers expected by InstanceofStub.
3047   ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left()));
3048   ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right()));
3049 
3050   InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
3051   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3052 
3053   // InstanceofStub returns a result in x0:
3054   //   0     => not an instance
3055   //   smi 1 => instance.
3056   __ Cmp(x0, 0);
3057   __ LoadTrueFalseRoots(x0, x1);
3058   __ Csel(x0, x0, x1, eq);
3059 }
3060 
3061 
DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal * instr)3062 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3063   class DeferredInstanceOfKnownGlobal: public LDeferredCode {
3064    public:
3065     DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
3066                                   LInstanceOfKnownGlobal* instr)
3067         : LDeferredCode(codegen), instr_(instr) { }
3068     virtual void Generate() {
3069       codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
3070     }
3071     virtual LInstruction* instr() { return instr_; }
3072    private:
3073     LInstanceOfKnownGlobal* instr_;
3074   };
3075 
3076   DeferredInstanceOfKnownGlobal* deferred =
3077       new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
3078 
3079   Label map_check, return_false, cache_miss, done;
3080   Register object = ToRegister(instr->value());
3081   Register result = ToRegister(instr->result());
3082   // x4 is expected in the associated deferred code and stub.
3083   Register map_check_site = x4;
3084   Register map = x5;
3085 
3086   // This instruction is marked as call. We can clobber any register.
3087   ASSERT(instr->IsMarkedAsCall());
3088 
3089   // We must take into account that object is in x11.
3090   ASSERT(object.Is(x11));
3091   Register scratch = x10;
3092 
3093   // A Smi is not instance of anything.
3094   __ JumpIfSmi(object, &return_false);
3095 
3096   // This is the inlined call site instanceof cache. The two occurences of the
3097   // hole value will be patched to the last map/result pair generated by the
3098   // instanceof stub.
3099   __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3100   {
3101     // Below we use Factory::the_hole_value() on purpose instead of loading from
3102     // the root array to force relocation and later be able to patch with a
3103     // custom value.
3104     InstructionAccurateScope scope(masm(), 5);
3105     __ bind(&map_check);
3106     // Will be patched with the cached map.
3107     Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
3108     __ ldr(scratch, Immediate(Handle<Object>(cell)));
3109     __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
3110     __ cmp(map, scratch);
3111     __ b(&cache_miss, ne);
3112     // The address of this instruction is computed relative to the map check
3113     // above, so check the size of the code generated.
3114     ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4);
3115     // Will be patched with the cached result.
3116     __ ldr(result, Immediate(factory()->the_hole_value()));
3117   }
3118   __ B(&done);
3119 
3120   // The inlined call site cache did not match.
3121   // Check null and string before calling the deferred code.
3122   __ Bind(&cache_miss);
3123   // Compute the address of the map check. It must not be clobbered until the
3124   // InstanceOfStub has used it.
3125   __ Adr(map_check_site, &map_check);
3126   // Null is not instance of anything.
3127   __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
3128 
3129   // String values are not instances of anything.
3130   // Return false if the object is a string. Otherwise, jump to the deferred
3131   // code.
3132   // Note that we can't jump directly to deferred code from
3133   // IsObjectJSStringType, because it uses tbz for the jump and the deferred
3134   // code can be out of range.
3135   __ IsObjectJSStringType(object, scratch, NULL, &return_false);
3136   __ B(deferred->entry());
3137 
3138   __ Bind(&return_false);
3139   __ LoadRoot(result, Heap::kFalseValueRootIndex);
3140 
3141   // Here result is either true or false.
3142   __ Bind(deferred->exit());
3143   __ Bind(&done);
3144 }
3145 
3146 
DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal * instr)3147 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3148   Register result = ToRegister(instr->result());
3149   ASSERT(result.Is(x0));  // InstanceofStub returns its result in x0.
3150   InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
3151   flags = static_cast<InstanceofStub::Flags>(
3152       flags | InstanceofStub::kArgsInRegisters);
3153   flags = static_cast<InstanceofStub::Flags>(
3154       flags | InstanceofStub::kReturnTrueFalseObject);
3155   flags = static_cast<InstanceofStub::Flags>(
3156       flags | InstanceofStub::kCallSiteInlineCheck);
3157 
3158   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3159   LoadContextFromDeferred(instr->context());
3160 
3161   // Prepare InstanceofStub arguments.
3162   ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left()));
3163   __ LoadObject(InstanceofStub::right(), instr->function());
3164 
3165   InstanceofStub stub(isolate(), flags);
3166   CallCodeGeneric(stub.GetCode(),
3167                   RelocInfo::CODE_TARGET,
3168                   instr,
3169                   RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3170   LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3171   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3172 
3173   // Put the result value into the result register slot.
3174   __ StoreToSafepointRegisterSlot(result, result);
3175 }
3176 
3177 
DoInstructionGap(LInstructionGap * instr)3178 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
3179   DoGap(instr);
3180 }
3181 
3182 
DoInteger32ToDouble(LInteger32ToDouble * instr)3183 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3184   Register value = ToRegister32(instr->value());
3185   DoubleRegister result = ToDoubleRegister(instr->result());
3186   __ Scvtf(result, value);
3187 }
3188 
3189 
DoInvokeFunction(LInvokeFunction * instr)3190 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3191   ASSERT(ToRegister(instr->context()).is(cp));
3192   // The function is required to be in x1.
3193   ASSERT(ToRegister(instr->function()).is(x1));
3194   ASSERT(instr->HasPointerMap());
3195 
3196   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3197   if (known_function.is_null()) {
3198     LPointerMap* pointers = instr->pointer_map();
3199     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3200     ParameterCount count(instr->arity());
3201     __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
3202   } else {
3203     CallKnownFunction(known_function,
3204                       instr->hydrogen()->formal_parameter_count(),
3205                       instr->arity(),
3206                       instr,
3207                       x1);
3208   }
3209   after_push_argument_ = false;
3210 }
3211 
3212 
DoIsConstructCallAndBranch(LIsConstructCallAndBranch * instr)3213 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
3214   Register temp1 = ToRegister(instr->temp1());
3215   Register temp2 = ToRegister(instr->temp2());
3216 
3217   // Get the frame pointer for the calling frame.
3218   __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3219 
3220   // Skip the arguments adaptor frame if it exists.
3221   Label check_frame_marker;
3222   __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
3223   __ Cmp(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3224   __ B(ne, &check_frame_marker);
3225   __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
3226 
3227   // Check the marker in the calling frame.
3228   __ Bind(&check_frame_marker);
3229   __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
3230 
3231   EmitCompareAndBranch(
3232       instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3233 }
3234 
3235 
DoIsObjectAndBranch(LIsObjectAndBranch * instr)3236 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
3237   Label* is_object = instr->TrueLabel(chunk_);
3238   Label* is_not_object = instr->FalseLabel(chunk_);
3239   Register value = ToRegister(instr->value());
3240   Register map = ToRegister(instr->temp1());
3241   Register scratch = ToRegister(instr->temp2());
3242 
3243   __ JumpIfSmi(value, is_not_object);
3244   __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
3245 
3246   __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3247 
3248   // Check for undetectable objects.
3249   __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
3250   __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
3251 
3252   // Check that instance type is in object type range.
3253   __ IsInstanceJSObjectType(map, scratch, NULL);
3254   // Flags have been updated by IsInstanceJSObjectType. We can now test the
3255   // flags for "le" condition to check if the object's type is a valid
3256   // JS object type.
3257   EmitBranch(instr, le);
3258 }
3259 
3260 
EmitIsString(Register input,Register temp1,Label * is_not_string,SmiCheck check_needed=INLINE_SMI_CHECK)3261 Condition LCodeGen::EmitIsString(Register input,
3262                                  Register temp1,
3263                                  Label* is_not_string,
3264                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
3265   if (check_needed == INLINE_SMI_CHECK) {
3266     __ JumpIfSmi(input, is_not_string);
3267   }
3268   __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
3269 
3270   return lt;
3271 }
3272 
3273 
DoIsStringAndBranch(LIsStringAndBranch * instr)3274 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
3275   Register val = ToRegister(instr->value());
3276   Register scratch = ToRegister(instr->temp());
3277 
3278   SmiCheck check_needed =
3279       instr->hydrogen()->value()->type().IsHeapObject()
3280           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3281   Condition true_cond =
3282       EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
3283 
3284   EmitBranch(instr, true_cond);
3285 }
3286 
3287 
DoIsSmiAndBranch(LIsSmiAndBranch * instr)3288 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
3289   Register value = ToRegister(instr->value());
3290   STATIC_ASSERT(kSmiTag == 0);
3291   EmitTestAndBranch(instr, eq, value, kSmiTagMask);
3292 }
3293 
3294 
DoIsUndetectableAndBranch(LIsUndetectableAndBranch * instr)3295 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
3296   Register input = ToRegister(instr->value());
3297   Register temp = ToRegister(instr->temp());
3298 
3299   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
3300     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3301   }
3302   __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
3303   __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3304 
3305   EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
3306 }
3307 
3308 
LabelType(LLabel * label)3309 static const char* LabelType(LLabel* label) {
3310   if (label->is_loop_header()) return " (loop header)";
3311   if (label->is_osr_entry()) return " (OSR entry)";
3312   return "";
3313 }
3314 
3315 
DoLabel(LLabel * label)3316 void LCodeGen::DoLabel(LLabel* label) {
3317   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
3318           current_instruction_,
3319           label->hydrogen_value()->id(),
3320           label->block_id(),
3321           LabelType(label));
3322 
3323   __ Bind(label->label());
3324   current_block_ = label->block_id();
3325   DoGap(label);
3326 }
3327 
3328 
DoLoadContextSlot(LLoadContextSlot * instr)3329 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3330   Register context = ToRegister(instr->context());
3331   Register result = ToRegister(instr->result());
3332   __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
3333   if (instr->hydrogen()->RequiresHoleCheck()) {
3334     if (instr->hydrogen()->DeoptimizesOnHole()) {
3335       DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3336                        instr->environment());
3337     } else {
3338       Label not_the_hole;
3339       __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
3340       __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3341       __ Bind(&not_the_hole);
3342     }
3343   }
3344 }
3345 
3346 
DoLoadFunctionPrototype(LLoadFunctionPrototype * instr)3347 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3348   Register function = ToRegister(instr->function());
3349   Register result = ToRegister(instr->result());
3350   Register temp = ToRegister(instr->temp());
3351 
3352   // Check that the function really is a function. Leaves map in the result
3353   // register.
3354   __ CompareObjectType(function, result, temp, JS_FUNCTION_TYPE);
3355   DeoptimizeIf(ne, instr->environment());
3356 
3357   // Make sure that the function has an instance prototype.
3358   Label non_instance;
3359   __ Ldrb(temp, FieldMemOperand(result, Map::kBitFieldOffset));
3360   __ Tbnz(temp, Map::kHasNonInstancePrototype, &non_instance);
3361 
3362   // Get the prototype or initial map from the function.
3363   __ Ldr(result, FieldMemOperand(function,
3364                                  JSFunction::kPrototypeOrInitialMapOffset));
3365 
3366   // Check that the function has a prototype or an initial map.
3367   DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3368                    instr->environment());
3369 
3370   // If the function does not have an initial map, we're done.
3371   Label done;
3372   __ CompareObjectType(result, temp, temp, MAP_TYPE);
3373   __ B(ne, &done);
3374 
3375   // Get the prototype from the initial map.
3376   __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3377   __ B(&done);
3378 
3379   // Non-instance prototype: fetch prototype from constructor field in initial
3380   // map.
3381   __ Bind(&non_instance);
3382   __ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
3383 
3384   // All done.
3385   __ Bind(&done);
3386 }
3387 
3388 
DoLoadGlobalCell(LLoadGlobalCell * instr)3389 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3390   Register result = ToRegister(instr->result());
3391   __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
3392   __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
3393   if (instr->hydrogen()->RequiresHoleCheck()) {
3394     DeoptimizeIfRoot(
3395         result, Heap::kTheHoleValueRootIndex, instr->environment());
3396   }
3397 }
3398 
3399 
DoLoadGlobalGeneric(LLoadGlobalGeneric * instr)3400 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3401   ASSERT(ToRegister(instr->context()).is(cp));
3402   ASSERT(ToRegister(instr->global_object()).Is(x0));
3403   ASSERT(ToRegister(instr->result()).Is(x0));
3404   __ Mov(x2, Operand(instr->name()));
3405   ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3406   Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
3407   CallCode(ic, RelocInfo::CODE_TARGET, instr);
3408 }
3409 
3410 
PrepareKeyedExternalArrayOperand(Register key,Register base,Register scratch,bool key_is_smi,bool key_is_constant,int constant_key,ElementsKind elements_kind,int base_offset)3411 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
3412     Register key,
3413     Register base,
3414     Register scratch,
3415     bool key_is_smi,
3416     bool key_is_constant,
3417     int constant_key,
3418     ElementsKind elements_kind,
3419     int base_offset) {
3420   int element_size_shift = ElementsKindToShiftSize(elements_kind);
3421 
3422   if (key_is_constant) {
3423     int key_offset = constant_key << element_size_shift;
3424     return MemOperand(base, key_offset + base_offset);
3425   }
3426 
3427   if (key_is_smi) {
3428     __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
3429     return MemOperand(scratch, base_offset);
3430   }
3431 
3432   if (base_offset == 0) {
3433     return MemOperand(base, key, SXTW, element_size_shift);
3434   }
3435 
3436   ASSERT(!AreAliased(scratch, key));
3437   __ Add(scratch, base, base_offset);
3438   return MemOperand(scratch, key, SXTW, element_size_shift);
3439 }
3440 
3441 
DoLoadKeyedExternal(LLoadKeyedExternal * instr)3442 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
3443   Register ext_ptr = ToRegister(instr->elements());
3444   Register scratch;
3445   ElementsKind elements_kind = instr->elements_kind();
3446 
3447   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3448   bool key_is_constant = instr->key()->IsConstantOperand();
3449   Register key = no_reg;
3450   int constant_key = 0;
3451   if (key_is_constant) {
3452     ASSERT(instr->temp() == NULL);
3453     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3454     if (constant_key & 0xf0000000) {
3455       Abort(kArrayIndexConstantValueTooBig);
3456     }
3457   } else {
3458     scratch = ToRegister(instr->temp());
3459     key = ToRegister(instr->key());
3460   }
3461 
3462   MemOperand mem_op =
3463       PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
3464                                        key_is_constant, constant_key,
3465                                        elements_kind,
3466                                        instr->base_offset());
3467 
3468   if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
3469       (elements_kind == FLOAT32_ELEMENTS)) {
3470     DoubleRegister result = ToDoubleRegister(instr->result());
3471     __ Ldr(result.S(), mem_op);
3472     __ Fcvt(result, result.S());
3473   } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
3474              (elements_kind == FLOAT64_ELEMENTS)) {
3475     DoubleRegister result = ToDoubleRegister(instr->result());
3476     __ Ldr(result, mem_op);
3477   } else {
3478     Register result = ToRegister(instr->result());
3479 
3480     switch (elements_kind) {
3481       case EXTERNAL_INT8_ELEMENTS:
3482       case INT8_ELEMENTS:
3483         __ Ldrsb(result, mem_op);
3484         break;
3485       case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3486       case EXTERNAL_UINT8_ELEMENTS:
3487       case UINT8_ELEMENTS:
3488       case UINT8_CLAMPED_ELEMENTS:
3489         __ Ldrb(result, mem_op);
3490         break;
3491       case EXTERNAL_INT16_ELEMENTS:
3492       case INT16_ELEMENTS:
3493         __ Ldrsh(result, mem_op);
3494         break;
3495       case EXTERNAL_UINT16_ELEMENTS:
3496       case UINT16_ELEMENTS:
3497         __ Ldrh(result, mem_op);
3498         break;
3499       case EXTERNAL_INT32_ELEMENTS:
3500       case INT32_ELEMENTS:
3501         __ Ldrsw(result, mem_op);
3502         break;
3503       case EXTERNAL_UINT32_ELEMENTS:
3504       case UINT32_ELEMENTS:
3505         __ Ldr(result.W(), mem_op);
3506         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3507           // Deopt if value > 0x80000000.
3508           __ Tst(result, 0xFFFFFFFF80000000);
3509           DeoptimizeIf(ne, instr->environment());
3510         }
3511         break;
3512       case FLOAT32_ELEMENTS:
3513       case FLOAT64_ELEMENTS:
3514       case EXTERNAL_FLOAT32_ELEMENTS:
3515       case EXTERNAL_FLOAT64_ELEMENTS:
3516       case FAST_HOLEY_DOUBLE_ELEMENTS:
3517       case FAST_HOLEY_ELEMENTS:
3518       case FAST_HOLEY_SMI_ELEMENTS:
3519       case FAST_DOUBLE_ELEMENTS:
3520       case FAST_ELEMENTS:
3521       case FAST_SMI_ELEMENTS:
3522       case DICTIONARY_ELEMENTS:
3523       case SLOPPY_ARGUMENTS_ELEMENTS:
3524         UNREACHABLE();
3525         break;
3526     }
3527   }
3528 }
3529 
3530 
PrepareKeyedArrayOperand(Register base,Register elements,Register key,bool key_is_tagged,ElementsKind elements_kind,Representation representation,int base_offset)3531 MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
3532                                               Register elements,
3533                                               Register key,
3534                                               bool key_is_tagged,
3535                                               ElementsKind elements_kind,
3536                                               Representation representation,
3537                                               int base_offset) {
3538   STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
3539   int element_size_shift = ElementsKindToShiftSize(elements_kind);
3540 
3541   // Even though the HLoad/StoreKeyed instructions force the input
3542   // representation for the key to be an integer, the input gets replaced during
3543   // bounds check elimination with the index argument to the bounds check, which
3544   // can be tagged, so that case must be handled here, too.
3545   if (key_is_tagged) {
3546     __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
3547     if (representation.IsInteger32()) {
3548       ASSERT(elements_kind == FAST_SMI_ELEMENTS);
3549       // Read or write only the most-significant 32 bits in the case of fast smi
3550       // arrays.
3551       return UntagSmiMemOperand(base, base_offset);
3552     } else {
3553       return MemOperand(base, base_offset);
3554     }
3555   } else {
3556     // Sign extend key because it could be a 32-bit negative value or contain
3557     // garbage in the top 32-bits. The address computation happens in 64-bit.
3558     ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
3559     if (representation.IsInteger32()) {
3560       ASSERT(elements_kind == FAST_SMI_ELEMENTS);
3561       // Read or write only the most-significant 32 bits in the case of fast smi
3562       // arrays.
3563       __ Add(base, elements, Operand(key, SXTW, element_size_shift));
3564       return UntagSmiMemOperand(base, base_offset);
3565     } else {
3566       __ Add(base, elements, base_offset);
3567       return MemOperand(base, key, SXTW, element_size_shift);
3568     }
3569   }
3570 }
3571 
3572 
DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble * instr)3573 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
3574   Register elements = ToRegister(instr->elements());
3575   DoubleRegister result = ToDoubleRegister(instr->result());
3576   MemOperand mem_op;
3577 
3578   if (instr->key()->IsConstantOperand()) {
3579     ASSERT(instr->hydrogen()->RequiresHoleCheck() ||
3580            (instr->temp() == NULL));
3581 
3582     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3583     if (constant_key & 0xf0000000) {
3584       Abort(kArrayIndexConstantValueTooBig);
3585     }
3586     int offset = instr->base_offset() + constant_key * kDoubleSize;
3587     mem_op = MemOperand(elements, offset);
3588   } else {
3589     Register load_base = ToRegister(instr->temp());
3590     Register key = ToRegister(instr->key());
3591     bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3592     mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3593                                       instr->hydrogen()->elements_kind(),
3594                                       instr->hydrogen()->representation(),
3595                                       instr->base_offset());
3596   }
3597 
3598   __ Ldr(result, mem_op);
3599 
3600   if (instr->hydrogen()->RequiresHoleCheck()) {
3601     Register scratch = ToRegister(instr->temp());
3602     // Detect the hole NaN by adding one to the integer representation of the
3603     // result, and checking for overflow.
3604     STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff);
3605     __ Ldr(scratch, mem_op);
3606     __ Cmn(scratch, 1);
3607     DeoptimizeIf(vs, instr->environment());
3608   }
3609 }
3610 
3611 
DoLoadKeyedFixed(LLoadKeyedFixed * instr)3612 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
3613   Register elements = ToRegister(instr->elements());
3614   Register result = ToRegister(instr->result());
3615   MemOperand mem_op;
3616 
3617   Representation representation = instr->hydrogen()->representation();
3618   if (instr->key()->IsConstantOperand()) {
3619     ASSERT(instr->temp() == NULL);
3620     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3621     int offset = instr->base_offset() +
3622         ToInteger32(const_operand) * kPointerSize;
3623     if (representation.IsInteger32()) {
3624       ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
3625       STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) &&
3626                     (kSmiTag == 0));
3627       mem_op = UntagSmiMemOperand(elements, offset);
3628     } else {
3629       mem_op = MemOperand(elements, offset);
3630     }
3631   } else {
3632     Register load_base = ToRegister(instr->temp());
3633     Register key = ToRegister(instr->key());
3634     bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3635 
3636     mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3637                                       instr->hydrogen()->elements_kind(),
3638                                       representation, instr->base_offset());
3639   }
3640 
3641   __ Load(result, mem_op, representation);
3642 
3643   if (instr->hydrogen()->RequiresHoleCheck()) {
3644     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3645       DeoptimizeIfNotSmi(result, instr->environment());
3646     } else {
3647       DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3648                        instr->environment());
3649     }
3650   }
3651 }
3652 
3653 
DoLoadKeyedGeneric(LLoadKeyedGeneric * instr)3654 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3655   ASSERT(ToRegister(instr->context()).is(cp));
3656   ASSERT(ToRegister(instr->object()).Is(x1));
3657   ASSERT(ToRegister(instr->key()).Is(x0));
3658 
3659   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3660   CallCode(ic, RelocInfo::CODE_TARGET, instr);
3661 
3662   ASSERT(ToRegister(instr->result()).Is(x0));
3663 }
3664 
3665 
DoLoadNamedField(LLoadNamedField * instr)3666 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3667   HObjectAccess access = instr->hydrogen()->access();
3668   int offset = access.offset();
3669   Register object = ToRegister(instr->object());
3670 
3671   if (access.IsExternalMemory()) {
3672     Register result = ToRegister(instr->result());
3673     __ Load(result, MemOperand(object, offset), access.representation());
3674     return;
3675   }
3676 
3677   if (instr->hydrogen()->representation().IsDouble()) {
3678     FPRegister result = ToDoubleRegister(instr->result());
3679     __ Ldr(result, FieldMemOperand(object, offset));
3680     return;
3681   }
3682 
3683   Register result = ToRegister(instr->result());
3684   Register source;
3685   if (access.IsInobject()) {
3686     source = object;
3687   } else {
3688     // Load the properties array, using result as a scratch register.
3689     __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3690     source = result;
3691   }
3692 
3693   if (access.representation().IsSmi() &&
3694       instr->hydrogen()->representation().IsInteger32()) {
3695     // Read int value directly from upper half of the smi.
3696     STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
3697     __ Load(result, UntagSmiFieldMemOperand(source, offset),
3698             Representation::Integer32());
3699   } else {
3700     __ Load(result, FieldMemOperand(source, offset), access.representation());
3701   }
3702 }
3703 
3704 
DoLoadNamedGeneric(LLoadNamedGeneric * instr)3705 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3706   ASSERT(ToRegister(instr->context()).is(cp));
3707   // LoadIC expects x2 to hold the name, and x0 to hold the receiver.
3708   ASSERT(ToRegister(instr->object()).is(x0));
3709   __ Mov(x2, Operand(instr->name()));
3710 
3711   Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3712   CallCode(ic, RelocInfo::CODE_TARGET, instr);
3713 
3714   ASSERT(ToRegister(instr->result()).is(x0));
3715 }
3716 
3717 
DoLoadRoot(LLoadRoot * instr)3718 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3719   Register result = ToRegister(instr->result());
3720   __ LoadRoot(result, instr->index());
3721 }
3722 
3723 
DoMapEnumLength(LMapEnumLength * instr)3724 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
3725   Register result = ToRegister(instr->result());
3726   Register map = ToRegister(instr->value());
3727   __ EnumLengthSmi(result, map);
3728 }
3729 
3730 
DoMathAbs(LMathAbs * instr)3731 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3732   Representation r = instr->hydrogen()->value()->representation();
3733   if (r.IsDouble()) {
3734     DoubleRegister input = ToDoubleRegister(instr->value());
3735     DoubleRegister result = ToDoubleRegister(instr->result());
3736     __ Fabs(result, input);
3737   } else if (r.IsSmi() || r.IsInteger32()) {
3738     Register input = r.IsSmi() ? ToRegister(instr->value())
3739                                : ToRegister32(instr->value());
3740     Register result = r.IsSmi() ? ToRegister(instr->result())
3741                                 : ToRegister32(instr->result());
3742     __ Abs(result, input);
3743     DeoptimizeIf(vs, instr->environment());
3744   }
3745 }
3746 
3747 
DoDeferredMathAbsTagged(LMathAbsTagged * instr,Label * exit,Label * allocation_entry)3748 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
3749                                        Label* exit,
3750                                        Label* allocation_entry) {
3751   // Handle the tricky cases of MathAbsTagged:
3752   //  - HeapNumber inputs.
3753   //    - Negative inputs produce a positive result, so a new HeapNumber is
3754   //      allocated to hold it.
3755   //    - Positive inputs are returned as-is, since there is no need to allocate
3756   //      a new HeapNumber for the result.
3757   //  - The (smi) input -0x80000000, produces +0x80000000, which does not fit
3758   //    a smi. In this case, the inline code sets the result and jumps directly
3759   //    to the allocation_entry label.
3760   ASSERT(instr->context() != NULL);
3761   ASSERT(ToRegister(instr->context()).is(cp));
3762   Register input = ToRegister(instr->value());
3763   Register temp1 = ToRegister(instr->temp1());
3764   Register temp2 = ToRegister(instr->temp2());
3765   Register result_bits = ToRegister(instr->temp3());
3766   Register result = ToRegister(instr->result());
3767 
3768   Label runtime_allocation;
3769 
3770   // Deoptimize if the input is not a HeapNumber.
3771   __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
3772   DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex,
3773                       instr->environment());
3774 
3775   // If the argument is positive, we can return it as-is, without any need to
3776   // allocate a new HeapNumber for the result. We have to do this in integer
3777   // registers (rather than with fabs) because we need to be able to distinguish
3778   // the two zeroes.
3779   __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
3780   __ Mov(result, input);
3781   __ Tbz(result_bits, kXSignBit, exit);
3782 
3783   // Calculate abs(input) by clearing the sign bit.
3784   __ Bic(result_bits, result_bits, kXSignMask);
3785 
3786   // Allocate a new HeapNumber to hold the result.
3787   //  result_bits   The bit representation of the (double) result.
3788   __ Bind(allocation_entry);
3789   __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
3790   // The inline (non-deferred) code will store result_bits into result.
3791   __ B(exit);
3792 
3793   __ Bind(&runtime_allocation);
3794   if (FLAG_debug_code) {
3795     // Because result is in the pointer map, we need to make sure it has a valid
3796     // tagged value before we call the runtime. We speculatively set it to the
3797     // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
3798     // be valid.
3799     Label result_ok;
3800     Register input = ToRegister(instr->value());
3801     __ JumpIfSmi(result, &result_ok);
3802     __ Cmp(input, result);
3803     __ Assert(eq, kUnexpectedValue);
3804     __ Bind(&result_ok);
3805   }
3806 
3807   { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3808     CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
3809                             instr->context());
3810     __ StoreToSafepointRegisterSlot(x0, result);
3811   }
3812   // The inline (non-deferred) code will store result_bits into result.
3813 }
3814 
3815 
DoMathAbsTagged(LMathAbsTagged * instr)3816 void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
3817   // Class for deferred case.
3818   class DeferredMathAbsTagged: public LDeferredCode {
3819    public:
3820     DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
3821         : LDeferredCode(codegen), instr_(instr) { }
3822     virtual void Generate() {
3823       codegen()->DoDeferredMathAbsTagged(instr_, exit(),
3824                                          allocation_entry());
3825     }
3826     virtual LInstruction* instr() { return instr_; }
3827     Label* allocation_entry() { return &allocation; }
3828    private:
3829     LMathAbsTagged* instr_;
3830     Label allocation;
3831   };
3832 
3833   // TODO(jbramley): The early-exit mechanism would skip the new frame handling
3834   // in GenerateDeferredCode. Tidy this up.
3835   ASSERT(!NeedsDeferredFrame());
3836 
3837   DeferredMathAbsTagged* deferred =
3838       new(zone()) DeferredMathAbsTagged(this, instr);
3839 
3840   ASSERT(instr->hydrogen()->value()->representation().IsTagged() ||
3841          instr->hydrogen()->value()->representation().IsSmi());
3842   Register input = ToRegister(instr->value());
3843   Register result_bits = ToRegister(instr->temp3());
3844   Register result = ToRegister(instr->result());
3845   Label done;
3846 
3847   // Handle smis inline.
3848   // We can treat smis as 64-bit integers, since the (low-order) tag bits will
3849   // never get set by the negation. This is therefore the same as the Integer32
3850   // case in DoMathAbs, except that it operates on 64-bit values.
3851   STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
3852 
3853   __ JumpIfNotSmi(input, deferred->entry());
3854 
3855   __ Abs(result, input, NULL, &done);
3856 
3857   // The result is the magnitude (abs) of the smallest value a smi can
3858   // represent, encoded as a double.
3859   __ Mov(result_bits, double_to_rawbits(0x80000000));
3860   __ B(deferred->allocation_entry());
3861 
3862   __ Bind(deferred->exit());
3863   __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
3864 
3865   __ Bind(&done);
3866 }
3867 
3868 
DoMathExp(LMathExp * instr)3869 void LCodeGen::DoMathExp(LMathExp* instr) {
3870   DoubleRegister input = ToDoubleRegister(instr->value());
3871   DoubleRegister result = ToDoubleRegister(instr->result());
3872   DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
3873   DoubleRegister double_temp2 = double_scratch();
3874   Register temp1 = ToRegister(instr->temp1());
3875   Register temp2 = ToRegister(instr->temp2());
3876   Register temp3 = ToRegister(instr->temp3());
3877 
3878   MathExpGenerator::EmitMathExp(masm(), input, result,
3879                                 double_temp1, double_temp2,
3880                                 temp1, temp2, temp3);
3881 }
3882 
3883 
DoMathFloorD(LMathFloorD * instr)3884 void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
3885   DoubleRegister input = ToDoubleRegister(instr->value());
3886   DoubleRegister result = ToDoubleRegister(instr->result());
3887 
3888   __ Frintm(result, input);
3889 }
3890 
3891 
DoMathFloorI(LMathFloorI * instr)3892 void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
3893   DoubleRegister input = ToDoubleRegister(instr->value());
3894   Register result = ToRegister(instr->result());
3895 
3896   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3897     DeoptimizeIfMinusZero(input, instr->environment());
3898   }
3899 
3900   __ Fcvtms(result, input);
3901 
3902   // Check that the result fits into a 32-bit integer.
3903   //  - The result did not overflow.
3904   __ Cmp(result, Operand(result, SXTW));
3905   //  - The input was not NaN.
3906   __ Fccmp(input, input, NoFlag, eq);
3907   DeoptimizeIf(ne, instr->environment());
3908 }
3909 
3910 
DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I * instr)3911 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
3912   Register dividend = ToRegister32(instr->dividend());
3913   Register result = ToRegister32(instr->result());
3914   int32_t divisor = instr->divisor();
3915 
3916   // If the divisor is 1, return the dividend.
3917   if (divisor == 1) {
3918     __ Mov(result, dividend, kDiscardForSameWReg);
3919     return;
3920   }
3921 
3922   // If the divisor is positive, things are easy: There can be no deopts and we
3923   // can simply do an arithmetic right shift.
3924   int32_t shift = WhichPowerOf2Abs(divisor);
3925   if (divisor > 1) {
3926     __ Mov(result, Operand(dividend, ASR, shift));
3927     return;
3928   }
3929 
3930   // If the divisor is negative, we have to negate and handle edge cases.
3931   __ Negs(result, dividend);
3932   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3933     DeoptimizeIf(eq, instr->environment());
3934   }
3935 
3936   // Dividing by -1 is basically negation, unless we overflow.
3937   if (divisor == -1) {
3938     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3939       DeoptimizeIf(vs, instr->environment());
3940     }
3941     return;
3942   }
3943 
3944   // If the negation could not overflow, simply shifting is OK.
3945   if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3946     __ Mov(result, Operand(dividend, ASR, shift));
3947     return;
3948   }
3949 
3950   __ Asr(result, result, shift);
3951   __ Csel(result, result, kMinInt / divisor, vc);
3952 }
3953 
3954 
DoFlooringDivByConstI(LFlooringDivByConstI * instr)3955 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
3956   Register dividend = ToRegister32(instr->dividend());
3957   int32_t divisor = instr->divisor();
3958   Register result = ToRegister32(instr->result());
3959   ASSERT(!AreAliased(dividend, result));
3960 
3961   if (divisor == 0) {
3962     Deoptimize(instr->environment());
3963     return;
3964   }
3965 
3966   // Check for (0 / -x) that will produce negative zero.
3967   HMathFloorOfDiv* hdiv = instr->hydrogen();
3968   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
3969     DeoptimizeIfZero(dividend, instr->environment());
3970   }
3971 
3972   // Easy case: We need no dynamic check for the dividend and the flooring
3973   // division is the same as the truncating division.
3974   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
3975       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
3976     __ TruncatingDiv(result, dividend, Abs(divisor));
3977     if (divisor < 0) __ Neg(result, result);
3978     return;
3979   }
3980 
3981   // In the general case we may need to adjust before and after the truncating
3982   // division to get a flooring division.
3983   Register temp = ToRegister32(instr->temp());
3984   ASSERT(!AreAliased(temp, dividend, result));
3985   Label needs_adjustment, done;
3986   __ Cmp(dividend, 0);
3987   __ B(divisor > 0 ? lt : gt, &needs_adjustment);
3988   __ TruncatingDiv(result, dividend, Abs(divisor));
3989   if (divisor < 0) __ Neg(result, result);
3990   __ B(&done);
3991   __ Bind(&needs_adjustment);
3992   __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
3993   __ TruncatingDiv(result, temp, Abs(divisor));
3994   if (divisor < 0) __ Neg(result, result);
3995   __ Sub(result, result, Operand(1));
3996   __ Bind(&done);
3997 }
3998 
3999 
4000 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
DoFlooringDivI(LFlooringDivI * instr)4001 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
4002   Register dividend = ToRegister32(instr->dividend());
4003   Register divisor = ToRegister32(instr->divisor());
4004   Register remainder = ToRegister32(instr->temp());
4005   Register result = ToRegister32(instr->result());
4006 
4007   // This can't cause an exception on ARM, so we can speculatively
4008   // execute it already now.
4009   __ Sdiv(result, dividend, divisor);
4010 
4011   // Check for x / 0.
4012   DeoptimizeIfZero(divisor, instr->environment());
4013 
4014   // Check for (kMinInt / -1).
4015   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
4016     // The V flag will be set iff dividend == kMinInt.
4017     __ Cmp(dividend, 1);
4018     __ Ccmp(divisor, -1, NoFlag, vs);
4019     DeoptimizeIf(eq, instr->environment());
4020   }
4021 
4022   // Check for (0 / -x) that will produce negative zero.
4023   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4024     __ Cmp(divisor, 0);
4025     __ Ccmp(dividend, 0, ZFlag, mi);
4026     // "divisor" can't be null because the code would have already been
4027     // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
4028     // In this case we need to deoptimize to produce a -0.
4029     DeoptimizeIf(eq, instr->environment());
4030   }
4031 
4032   Label done;
4033   // If both operands have the same sign then we are done.
4034   __ Eor(remainder, dividend, divisor);
4035   __ Tbz(remainder, kWSignBit, &done);
4036 
4037   // Check if the result needs to be corrected.
4038   __ Msub(remainder, result, divisor, dividend);
4039   __ Cbz(remainder, &done);
4040   __ Sub(result, result, 1);
4041 
4042   __ Bind(&done);
4043 }
4044 
4045 
DoMathLog(LMathLog * instr)4046 void LCodeGen::DoMathLog(LMathLog* instr) {
4047   ASSERT(instr->IsMarkedAsCall());
4048   ASSERT(ToDoubleRegister(instr->value()).is(d0));
4049   __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
4050                    0, 1);
4051   ASSERT(ToDoubleRegister(instr->result()).Is(d0));
4052 }
4053 
4054 
DoMathClz32(LMathClz32 * instr)4055 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4056   Register input = ToRegister32(instr->value());
4057   Register result = ToRegister32(instr->result());
4058   __ Clz(result, input);
4059 }
4060 
4061 
DoMathPowHalf(LMathPowHalf * instr)4062 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4063   DoubleRegister input = ToDoubleRegister(instr->value());
4064   DoubleRegister result = ToDoubleRegister(instr->result());
4065   Label done;
4066 
4067   // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
4068   //  Math.pow(-Infinity, 0.5) == +Infinity
4069   //  Math.pow(-0.0, 0.5) == +0.0
4070 
4071   // Catch -infinity inputs first.
4072   // TODO(jbramley): A constant infinity register would be helpful here.
4073   __ Fmov(double_scratch(), kFP64NegativeInfinity);
4074   __ Fcmp(double_scratch(), input);
4075   __ Fabs(result, input);
4076   __ B(&done, eq);
4077 
4078   // Add +0.0 to convert -0.0 to +0.0.
4079   __ Fadd(double_scratch(), input, fp_zero);
4080   __ Fsqrt(result, double_scratch());
4081 
4082   __ Bind(&done);
4083 }
4084 
4085 
DoPower(LPower * instr)4086 void LCodeGen::DoPower(LPower* instr) {
4087   Representation exponent_type = instr->hydrogen()->right()->representation();
4088   // Having marked this as a call, we can use any registers.
4089   // Just make sure that the input/output registers are the expected ones.
4090   ASSERT(!instr->right()->IsDoubleRegister() ||
4091          ToDoubleRegister(instr->right()).is(d1));
4092   ASSERT(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
4093          ToRegister(instr->right()).is(x11));
4094   ASSERT(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12));
4095   ASSERT(ToDoubleRegister(instr->left()).is(d0));
4096   ASSERT(ToDoubleRegister(instr->result()).is(d0));
4097 
4098   if (exponent_type.IsSmi()) {
4099     MathPowStub stub(isolate(), MathPowStub::TAGGED);
4100     __ CallStub(&stub);
4101   } else if (exponent_type.IsTagged()) {
4102     Label no_deopt;
4103     __ JumpIfSmi(x11, &no_deopt);
4104     __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset));
4105     DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
4106                         instr->environment());
4107     __ Bind(&no_deopt);
4108     MathPowStub stub(isolate(), MathPowStub::TAGGED);
4109     __ CallStub(&stub);
4110   } else if (exponent_type.IsInteger32()) {
4111     // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
4112     // supports large integer exponents.
4113     Register exponent = ToRegister(instr->right());
4114     __ Sxtw(exponent, exponent);
4115     MathPowStub stub(isolate(), MathPowStub::INTEGER);
4116     __ CallStub(&stub);
4117   } else {
4118     ASSERT(exponent_type.IsDouble());
4119     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
4120     __ CallStub(&stub);
4121   }
4122 }
4123 
4124 
DoMathRoundD(LMathRoundD * instr)4125 void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
4126   DoubleRegister input = ToDoubleRegister(instr->value());
4127   DoubleRegister result = ToDoubleRegister(instr->result());
4128   DoubleRegister scratch_d = double_scratch();
4129 
4130   ASSERT(!AreAliased(input, result, scratch_d));
4131 
4132   Label done;
4133 
4134   __ Frinta(result, input);
4135   __ Fcmp(input, 0.0);
4136   __ Fccmp(result, input, ZFlag, lt);
4137   // The result is correct if the input was in [-0, +infinity], or was a
4138   // negative integral value.
4139   __ B(eq, &done);
4140 
4141   // Here the input is negative, non integral, with an exponent lower than 52.
4142   // We do not have to worry about the 0.49999999999999994 (0x3fdfffffffffffff)
4143   // case. So we can safely add 0.5.
4144   __ Fmov(scratch_d, 0.5);
4145   __ Fadd(result, input, scratch_d);
4146   __ Frintm(result, result);
4147   // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
4148   __ Fabs(result, result);
4149   __ Fneg(result, result);
4150 
4151   __ Bind(&done);
4152 }
4153 
4154 
DoMathRoundI(LMathRoundI * instr)4155 void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
4156   DoubleRegister input = ToDoubleRegister(instr->value());
4157   DoubleRegister temp = ToDoubleRegister(instr->temp1());
4158   DoubleRegister dot_five = double_scratch();
4159   Register result = ToRegister(instr->result());
4160   Label done;
4161 
4162   // Math.round() rounds to the nearest integer, with ties going towards
4163   // +infinity. This does not match any IEEE-754 rounding mode.
4164   //  - Infinities and NaNs are propagated unchanged, but cause deopts because
4165   //    they can't be represented as integers.
4166   //  - The sign of the result is the same as the sign of the input. This means
4167   //    that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
4168   //    result of -0.0.
4169 
4170   // Add 0.5 and round towards -infinity.
4171   __ Fmov(dot_five, 0.5);
4172   __ Fadd(temp, input, dot_five);
4173   __ Fcvtms(result, temp);
4174 
4175   // The result is correct if:
4176   //  result is not 0, as the input could be NaN or [-0.5, -0.0].
4177   //  result is not 1, as 0.499...94 will wrongly map to 1.
4178   //  result fits in 32 bits.
4179   __ Cmp(result, Operand(result.W(), SXTW));
4180   __ Ccmp(result, 1, ZFlag, eq);
4181   __ B(hi, &done);
4182 
4183   // At this point, we have to handle possible inputs of NaN or numbers in the
4184   // range [-0.5, 1.5[, or numbers larger than 32 bits.
4185 
4186   // Deoptimize if the result > 1, as it must be larger than 32 bits.
4187   __ Cmp(result, 1);
4188   DeoptimizeIf(hi, instr->environment());
4189 
4190   // Deoptimize for negative inputs, which at this point are only numbers in
4191   // the range [-0.5, -0.0]
4192   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4193     __ Fmov(result, input);
4194     DeoptimizeIfNegative(result, instr->environment());
4195   }
4196 
4197   // Deoptimize if the input was NaN.
4198   __ Fcmp(input, dot_five);
4199   DeoptimizeIf(vs, instr->environment());
4200 
4201   // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
4202   // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
4203   // else 0; we avoid dealing with 0.499...94 directly.
4204   __ Cset(result, ge);
4205   __ Bind(&done);
4206 }
4207 
4208 
DoMathSqrt(LMathSqrt * instr)4209 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4210   DoubleRegister input = ToDoubleRegister(instr->value());
4211   DoubleRegister result = ToDoubleRegister(instr->result());
4212   __ Fsqrt(result, input);
4213 }
4214 
4215 
DoMathMinMax(LMathMinMax * instr)4216 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
4217   HMathMinMax::Operation op = instr->hydrogen()->operation();
4218   if (instr->hydrogen()->representation().IsInteger32()) {
4219     Register result = ToRegister32(instr->result());
4220     Register left = ToRegister32(instr->left());
4221     Operand right = ToOperand32I(instr->right());
4222 
4223     __ Cmp(left, right);
4224     __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4225   } else if (instr->hydrogen()->representation().IsSmi()) {
4226     Register result = ToRegister(instr->result());
4227     Register left = ToRegister(instr->left());
4228     Operand right = ToOperand(instr->right());
4229 
4230     __ Cmp(left, right);
4231     __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4232   } else {
4233     ASSERT(instr->hydrogen()->representation().IsDouble());
4234     DoubleRegister result = ToDoubleRegister(instr->result());
4235     DoubleRegister left = ToDoubleRegister(instr->left());
4236     DoubleRegister right = ToDoubleRegister(instr->right());
4237 
4238     if (op == HMathMinMax::kMathMax) {
4239       __ Fmax(result, left, right);
4240     } else {
4241       ASSERT(op == HMathMinMax::kMathMin);
4242       __ Fmin(result, left, right);
4243     }
4244   }
4245 }
4246 
4247 
DoModByPowerOf2I(LModByPowerOf2I * instr)4248 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
4249   Register dividend = ToRegister32(instr->dividend());
4250   int32_t divisor = instr->divisor();
4251   ASSERT(dividend.is(ToRegister32(instr->result())));
4252 
4253   // Theoretically, a variation of the branch-free code for integer division by
4254   // a power of 2 (calculating the remainder via an additional multiplication
4255   // (which gets simplified to an 'and') and subtraction) should be faster, and
4256   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
4257   // indicate that positive dividends are heavily favored, so the branching
4258   // version performs better.
4259   HMod* hmod = instr->hydrogen();
4260   int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
4261   Label dividend_is_not_negative, done;
4262   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
4263     __ Tbz(dividend, kWSignBit, &dividend_is_not_negative);
4264     // Note that this is correct even for kMinInt operands.
4265     __ Neg(dividend, dividend);
4266     __ And(dividend, dividend, mask);
4267     __ Negs(dividend, dividend);
4268     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4269       DeoptimizeIf(eq, instr->environment());
4270     }
4271     __ B(&done);
4272   }
4273 
4274   __ bind(&dividend_is_not_negative);
4275   __ And(dividend, dividend, mask);
4276   __ bind(&done);
4277 }
4278 
4279 
DoModByConstI(LModByConstI * instr)4280 void LCodeGen::DoModByConstI(LModByConstI* instr) {
4281   Register dividend = ToRegister32(instr->dividend());
4282   int32_t divisor = instr->divisor();
4283   Register result = ToRegister32(instr->result());
4284   Register temp = ToRegister32(instr->temp());
4285   ASSERT(!AreAliased(dividend, result, temp));
4286 
4287   if (divisor == 0) {
4288     Deoptimize(instr->environment());
4289     return;
4290   }
4291 
4292   __ TruncatingDiv(result, dividend, Abs(divisor));
4293   __ Sxtw(dividend.X(), dividend);
4294   __ Mov(temp, Abs(divisor));
4295   __ Smsubl(result.X(), result, temp, dividend.X());
4296 
4297   // Check for negative zero.
4298   HMod* hmod = instr->hydrogen();
4299   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4300     Label remainder_not_zero;
4301     __ Cbnz(result, &remainder_not_zero);
4302     DeoptimizeIfNegative(dividend, instr->environment());
4303     __ bind(&remainder_not_zero);
4304   }
4305 }
4306 
4307 
DoModI(LModI * instr)4308 void LCodeGen::DoModI(LModI* instr) {
4309   Register dividend = ToRegister32(instr->left());
4310   Register divisor = ToRegister32(instr->right());
4311   Register result = ToRegister32(instr->result());
4312 
4313   Label done;
4314   // modulo = dividend - quotient * divisor
4315   __ Sdiv(result, dividend, divisor);
4316   if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
4317     DeoptimizeIfZero(divisor, instr->environment());
4318   }
4319   __ Msub(result, result, divisor, dividend);
4320   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4321     __ Cbnz(result, &done);
4322     DeoptimizeIfNegative(dividend, instr->environment());
4323   }
4324   __ Bind(&done);
4325 }
4326 
4327 
DoMulConstIS(LMulConstIS * instr)4328 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
4329   ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32());
4330   bool is_smi = instr->hydrogen()->representation().IsSmi();
4331   Register result =
4332       is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
4333   Register left =
4334       is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
4335   int32_t right = ToInteger32(instr->right());
4336   ASSERT((right > -kMaxInt) || (right < kMaxInt));
4337 
4338   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4339   bool bailout_on_minus_zero =
4340     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4341 
4342   if (bailout_on_minus_zero) {
4343     if (right < 0) {
4344       // The result is -0 if right is negative and left is zero.
4345       DeoptimizeIfZero(left, instr->environment());
4346     } else if (right == 0) {
4347       // The result is -0 if the right is zero and the left is negative.
4348       DeoptimizeIfNegative(left, instr->environment());
4349     }
4350   }
4351 
4352   switch (right) {
4353     // Cases which can detect overflow.
4354     case -1:
4355       if (can_overflow) {
4356         // Only 0x80000000 can overflow here.
4357         __ Negs(result, left);
4358         DeoptimizeIf(vs, instr->environment());
4359       } else {
4360         __ Neg(result, left);
4361       }
4362       break;
4363     case 0:
4364       // This case can never overflow.
4365       __ Mov(result, 0);
4366       break;
4367     case 1:
4368       // This case can never overflow.
4369       __ Mov(result, left, kDiscardForSameWReg);
4370       break;
4371     case 2:
4372       if (can_overflow) {
4373         __ Adds(result, left, left);
4374         DeoptimizeIf(vs, instr->environment());
4375       } else {
4376         __ Add(result, left, left);
4377       }
4378       break;
4379 
4380     default:
4381       // Multiplication by constant powers of two (and some related values)
4382       // can be done efficiently with shifted operands.
4383       int32_t right_abs = Abs(right);
4384 
4385       if (IsPowerOf2(right_abs)) {
4386         int right_log2 = WhichPowerOf2(right_abs);
4387 
4388         if (can_overflow) {
4389           Register scratch = result;
4390           ASSERT(!AreAliased(scratch, left));
4391           __ Cls(scratch, left);
4392           __ Cmp(scratch, right_log2);
4393           DeoptimizeIf(lt, instr->environment());
4394         }
4395 
4396         if (right >= 0) {
4397           // result = left << log2(right)
4398           __ Lsl(result, left, right_log2);
4399         } else {
4400           // result = -left << log2(-right)
4401           if (can_overflow) {
4402             __ Negs(result, Operand(left, LSL, right_log2));
4403             DeoptimizeIf(vs, instr->environment());
4404           } else {
4405             __ Neg(result, Operand(left, LSL, right_log2));
4406           }
4407         }
4408         return;
4409       }
4410 
4411 
4412       // For the following cases, we could perform a conservative overflow check
4413       // with CLS as above. However the few cycles saved are likely not worth
4414       // the risk of deoptimizing more often than required.
4415       ASSERT(!can_overflow);
4416 
4417       if (right >= 0) {
4418         if (IsPowerOf2(right - 1)) {
4419           // result = left + left << log2(right - 1)
4420           __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
4421         } else if (IsPowerOf2(right + 1)) {
4422           // result = -left + left << log2(right + 1)
4423           __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
4424           __ Neg(result, result);
4425         } else {
4426           UNREACHABLE();
4427         }
4428       } else {
4429         if (IsPowerOf2(-right + 1)) {
4430           // result = left - left << log2(-right + 1)
4431           __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
4432         } else if (IsPowerOf2(-right - 1)) {
4433           // result = -left - left << log2(-right - 1)
4434           __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
4435           __ Neg(result, result);
4436         } else {
4437           UNREACHABLE();
4438         }
4439       }
4440   }
4441 }
4442 
4443 
DoMulI(LMulI * instr)4444 void LCodeGen::DoMulI(LMulI* instr) {
4445   Register result = ToRegister32(instr->result());
4446   Register left = ToRegister32(instr->left());
4447   Register right = ToRegister32(instr->right());
4448 
4449   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4450   bool bailout_on_minus_zero =
4451     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4452 
4453   if (bailout_on_minus_zero && !left.Is(right)) {
4454     // If one operand is zero and the other is negative, the result is -0.
4455     //  - Set Z (eq) if either left or right, or both, are 0.
4456     __ Cmp(left, 0);
4457     __ Ccmp(right, 0, ZFlag, ne);
4458     //  - If so (eq), set N (mi) if left + right is negative.
4459     //  - Otherwise, clear N.
4460     __ Ccmn(left, right, NoFlag, eq);
4461     DeoptimizeIf(mi, instr->environment());
4462   }
4463 
4464   if (can_overflow) {
4465     __ Smull(result.X(), left, right);
4466     __ Cmp(result.X(), Operand(result, SXTW));
4467     DeoptimizeIf(ne, instr->environment());
4468   } else {
4469     __ Mul(result, left, right);
4470   }
4471 }
4472 
4473 
DoMulS(LMulS * instr)4474 void LCodeGen::DoMulS(LMulS* instr) {
4475   Register result = ToRegister(instr->result());
4476   Register left = ToRegister(instr->left());
4477   Register right = ToRegister(instr->right());
4478 
4479   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4480   bool bailout_on_minus_zero =
4481     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4482 
4483   if (bailout_on_minus_zero && !left.Is(right)) {
4484     // If one operand is zero and the other is negative, the result is -0.
4485     //  - Set Z (eq) if either left or right, or both, are 0.
4486     __ Cmp(left, 0);
4487     __ Ccmp(right, 0, ZFlag, ne);
4488     //  - If so (eq), set N (mi) if left + right is negative.
4489     //  - Otherwise, clear N.
4490     __ Ccmn(left, right, NoFlag, eq);
4491     DeoptimizeIf(mi, instr->environment());
4492   }
4493 
4494   STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
4495   if (can_overflow) {
4496     __ Smulh(result, left, right);
4497     __ Cmp(result, Operand(result.W(), SXTW));
4498     __ SmiTag(result);
4499     DeoptimizeIf(ne, instr->environment());
4500   } else {
4501     if (AreAliased(result, left, right)) {
4502       // All three registers are the same: half untag the input and then
4503       // multiply, giving a tagged result.
4504       STATIC_ASSERT((kSmiShift % 2) == 0);
4505       __ Asr(result, left, kSmiShift / 2);
4506       __ Mul(result, result, result);
4507     } else if (result.Is(left) && !left.Is(right)) {
4508       // Registers result and left alias, right is distinct: untag left into
4509       // result, and then multiply by right, giving a tagged result.
4510       __ SmiUntag(result, left);
4511       __ Mul(result, result, right);
4512     } else {
4513       ASSERT(!left.Is(result));
4514       // Registers result and right alias, left is distinct, or all registers
4515       // are distinct: untag right into result, and then multiply by left,
4516       // giving a tagged result.
4517       __ SmiUntag(result, right);
4518       __ Mul(result, left, result);
4519     }
4520   }
4521 }
4522 
4523 
DoDeferredNumberTagD(LNumberTagD * instr)4524 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4525   // TODO(3095996): Get rid of this. For now, we need to make the
4526   // result register contain a valid pointer because it is already
4527   // contained in the register pointer map.
4528   Register result = ToRegister(instr->result());
4529   __ Mov(result, 0);
4530 
4531   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4532   // NumberTagU and NumberTagD use the context from the frame, rather than
4533   // the environment's HContext or HInlinedContext value.
4534   // They only call Runtime::kHiddenAllocateHeapNumber.
4535   // The corresponding HChange instructions are added in a phase that does
4536   // not have easy access to the local context.
4537   __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4538   __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4539   RecordSafepointWithRegisters(
4540       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4541   __ StoreToSafepointRegisterSlot(x0, result);
4542 }
4543 
4544 
DoNumberTagD(LNumberTagD * instr)4545 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4546   class DeferredNumberTagD: public LDeferredCode {
4547    public:
4548     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4549         : LDeferredCode(codegen), instr_(instr) { }
4550     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4551     virtual LInstruction* instr() { return instr_; }
4552    private:
4553     LNumberTagD* instr_;
4554   };
4555 
4556   DoubleRegister input = ToDoubleRegister(instr->value());
4557   Register result = ToRegister(instr->result());
4558   Register temp1 = ToRegister(instr->temp1());
4559   Register temp2 = ToRegister(instr->temp2());
4560 
4561   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4562   if (FLAG_inline_new) {
4563     __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
4564   } else {
4565     __ B(deferred->entry());
4566   }
4567 
4568   __ Bind(deferred->exit());
4569   __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
4570 }
4571 
4572 
DoDeferredNumberTagU(LInstruction * instr,LOperand * value,LOperand * temp1,LOperand * temp2)4573 void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
4574                                     LOperand* value,
4575                                     LOperand* temp1,
4576                                     LOperand* temp2) {
4577   Label slow, convert_and_store;
4578   Register src = ToRegister32(value);
4579   Register dst = ToRegister(instr->result());
4580   Register scratch1 = ToRegister(temp1);
4581 
4582   if (FLAG_inline_new) {
4583     Register scratch2 = ToRegister(temp2);
4584     __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
4585     __ B(&convert_and_store);
4586   }
4587 
4588   // Slow case: call the runtime system to do the number allocation.
4589   __ Bind(&slow);
4590   // TODO(3095996): Put a valid pointer value in the stack slot where the result
4591   // register is stored, as this register is in the pointer map, but contains an
4592   // integer value.
4593   __ Mov(dst, 0);
4594   {
4595     // Preserve the value of all registers.
4596     PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4597 
4598     // NumberTagU and NumberTagD use the context from the frame, rather than
4599     // the environment's HContext or HInlinedContext value.
4600     // They only call Runtime::kHiddenAllocateHeapNumber.
4601     // The corresponding HChange instructions are added in a phase that does
4602     // not have easy access to the local context.
4603     __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4604     __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4605     RecordSafepointWithRegisters(
4606       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4607     __ StoreToSafepointRegisterSlot(x0, dst);
4608   }
4609 
4610   // Convert number to floating point and store in the newly allocated heap
4611   // number.
4612   __ Bind(&convert_and_store);
4613   DoubleRegister dbl_scratch = double_scratch();
4614   __ Ucvtf(dbl_scratch, src);
4615   __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4616 }
4617 
4618 
DoNumberTagU(LNumberTagU * instr)4619 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4620   class DeferredNumberTagU: public LDeferredCode {
4621    public:
4622     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4623         : LDeferredCode(codegen), instr_(instr) { }
4624     virtual void Generate() {
4625       codegen()->DoDeferredNumberTagU(instr_,
4626                                       instr_->value(),
4627                                       instr_->temp1(),
4628                                       instr_->temp2());
4629     }
4630     virtual LInstruction* instr() { return instr_; }
4631    private:
4632     LNumberTagU* instr_;
4633   };
4634 
4635   Register value = ToRegister32(instr->value());
4636   Register result = ToRegister(instr->result());
4637 
4638   DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4639   __ Cmp(value, Smi::kMaxValue);
4640   __ B(hi, deferred->entry());
4641   __ SmiTag(result, value.X());
4642   __ Bind(deferred->exit());
4643 }
4644 
4645 
DoNumberUntagD(LNumberUntagD * instr)4646 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4647   Register input = ToRegister(instr->value());
4648   Register scratch = ToRegister(instr->temp());
4649   DoubleRegister result = ToDoubleRegister(instr->result());
4650   bool can_convert_undefined_to_nan =
4651       instr->hydrogen()->can_convert_undefined_to_nan();
4652 
4653   Label done, load_smi;
4654 
4655   // Work out what untag mode we're working with.
4656   HValue* value = instr->hydrogen()->value();
4657   NumberUntagDMode mode = value->representation().IsSmi()
4658       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4659 
4660   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4661     __ JumpIfSmi(input, &load_smi);
4662 
4663     Label convert_undefined;
4664 
4665     // Heap number map check.
4666     __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
4667     if (can_convert_undefined_to_nan) {
4668       __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
4669                        &convert_undefined);
4670     } else {
4671       DeoptimizeIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
4672                           instr->environment());
4673     }
4674 
4675     // Load heap number.
4676     __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
4677     if (instr->hydrogen()->deoptimize_on_minus_zero()) {
4678       DeoptimizeIfMinusZero(result, instr->environment());
4679     }
4680     __ B(&done);
4681 
4682     if (can_convert_undefined_to_nan) {
4683       __ Bind(&convert_undefined);
4684       DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
4685                           instr->environment());
4686 
4687       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4688       __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4689       __ B(&done);
4690     }
4691 
4692   } else {
4693     ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4694     // Fall through to load_smi.
4695   }
4696 
4697   // Smi to double register conversion.
4698   __ Bind(&load_smi);
4699   __ SmiUntagToDouble(result, input);
4700 
4701   __ Bind(&done);
4702 }
4703 
4704 
DoOsrEntry(LOsrEntry * instr)4705 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4706   // This is a pseudo-instruction that ensures that the environment here is
4707   // properly registered for deoptimization and records the assembler's PC
4708   // offset.
4709   LEnvironment* environment = instr->environment();
4710 
4711   // If the environment were already registered, we would have no way of
4712   // backpatching it with the spill slot operands.
4713   ASSERT(!environment->HasBeenRegistered());
4714   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4715 
4716   GenerateOsrPrologue();
4717 }
4718 
4719 
DoParameter(LParameter * instr)4720 void LCodeGen::DoParameter(LParameter* instr) {
4721   // Nothing to do.
4722 }
4723 
4724 
DoPreparePushArguments(LPreparePushArguments * instr)4725 void LCodeGen::DoPreparePushArguments(LPreparePushArguments* instr) {
4726   __ PushPreamble(instr->argc(), kPointerSize);
4727 }
4728 
4729 
DoPushArguments(LPushArguments * instr)4730 void LCodeGen::DoPushArguments(LPushArguments* instr) {
4731   MacroAssembler::PushPopQueue args(masm());
4732 
4733   for (int i = 0; i < instr->ArgumentCount(); ++i) {
4734     LOperand* arg = instr->argument(i);
4735     if (arg->IsDoubleRegister() || arg->IsDoubleStackSlot()) {
4736       Abort(kDoPushArgumentNotImplementedForDoubleType);
4737       return;
4738     }
4739     args.Queue(ToRegister(arg));
4740   }
4741 
4742   // The preamble was done by LPreparePushArguments.
4743   args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
4744 
4745   after_push_argument_ = true;
4746 }
4747 
4748 
DoReturn(LReturn * instr)4749 void LCodeGen::DoReturn(LReturn* instr) {
4750   if (FLAG_trace && info()->IsOptimizing()) {
4751     // Push the return value on the stack as the parameter.
4752     // Runtime::TraceExit returns its parameter in x0.  We're leaving the code
4753     // managed by the register allocator and tearing down the frame, it's
4754     // safe to write to the context register.
4755     __ Push(x0);
4756     __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4757     __ CallRuntime(Runtime::kTraceExit, 1);
4758   }
4759 
4760   if (info()->saves_caller_doubles()) {
4761     RestoreCallerDoubles();
4762   }
4763 
4764   int no_frame_start = -1;
4765   if (NeedsEagerFrame()) {
4766     Register stack_pointer = masm()->StackPointer();
4767     __ Mov(stack_pointer, fp);
4768     no_frame_start = masm_->pc_offset();
4769     __ Pop(fp, lr);
4770   }
4771 
4772   if (instr->has_constant_parameter_count()) {
4773     int parameter_count = ToInteger32(instr->constant_parameter_count());
4774     __ Drop(parameter_count + 1);
4775   } else {
4776     Register parameter_count = ToRegister(instr->parameter_count());
4777     __ DropBySMI(parameter_count);
4778   }
4779   __ Ret();
4780 
4781   if (no_frame_start != -1) {
4782     info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
4783   }
4784 }
4785 
4786 
BuildSeqStringOperand(Register string,Register temp,LOperand * index,String::Encoding encoding)4787 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
4788                                            Register temp,
4789                                            LOperand* index,
4790                                            String::Encoding encoding) {
4791   if (index->IsConstantOperand()) {
4792     int offset = ToInteger32(LConstantOperand::cast(index));
4793     if (encoding == String::TWO_BYTE_ENCODING) {
4794       offset *= kUC16Size;
4795     }
4796     STATIC_ASSERT(kCharSize == 1);
4797     return FieldMemOperand(string, SeqString::kHeaderSize + offset);
4798   }
4799 
4800   __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag);
4801   if (encoding == String::ONE_BYTE_ENCODING) {
4802     return MemOperand(temp, ToRegister32(index), SXTW);
4803   } else {
4804     STATIC_ASSERT(kUC16Size == 2);
4805     return MemOperand(temp, ToRegister32(index), SXTW, 1);
4806   }
4807 }
4808 
4809 
DoSeqStringGetChar(LSeqStringGetChar * instr)4810 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
4811   String::Encoding encoding = instr->hydrogen()->encoding();
4812   Register string = ToRegister(instr->string());
4813   Register result = ToRegister(instr->result());
4814   Register temp = ToRegister(instr->temp());
4815 
4816   if (FLAG_debug_code) {
4817     // Even though this lithium instruction comes with a temp register, we
4818     // can't use it here because we want to use "AtStart" constraints on the
4819     // inputs and the debug code here needs a scratch register.
4820     UseScratchRegisterScope temps(masm());
4821     Register dbg_temp = temps.AcquireX();
4822 
4823     __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset));
4824     __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset));
4825 
4826     __ And(dbg_temp, dbg_temp,
4827            Operand(kStringRepresentationMask | kStringEncodingMask));
4828     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4829     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4830     __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING
4831                              ? one_byte_seq_type : two_byte_seq_type));
4832     __ Check(eq, kUnexpectedStringType);
4833   }
4834 
4835   MemOperand operand =
4836       BuildSeqStringOperand(string, temp, instr->index(), encoding);
4837   if (encoding == String::ONE_BYTE_ENCODING) {
4838     __ Ldrb(result, operand);
4839   } else {
4840     __ Ldrh(result, operand);
4841   }
4842 }
4843 
4844 
DoSeqStringSetChar(LSeqStringSetChar * instr)4845 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
4846   String::Encoding encoding = instr->hydrogen()->encoding();
4847   Register string = ToRegister(instr->string());
4848   Register value = ToRegister(instr->value());
4849   Register temp = ToRegister(instr->temp());
4850 
4851   if (FLAG_debug_code) {
4852     ASSERT(ToRegister(instr->context()).is(cp));
4853     Register index = ToRegister(instr->index());
4854     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4855     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4856     int encoding_mask =
4857         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
4858         ? one_byte_seq_type : two_byte_seq_type;
4859     __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
4860                                  encoding_mask);
4861   }
4862   MemOperand operand =
4863       BuildSeqStringOperand(string, temp, instr->index(), encoding);
4864   if (encoding == String::ONE_BYTE_ENCODING) {
4865     __ Strb(value, operand);
4866   } else {
4867     __ Strh(value, operand);
4868   }
4869 }
4870 
4871 
DoSmiTag(LSmiTag * instr)4872 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4873   HChange* hchange = instr->hydrogen();
4874   Register input = ToRegister(instr->value());
4875   Register output = ToRegister(instr->result());
4876   if (hchange->CheckFlag(HValue::kCanOverflow) &&
4877       hchange->value()->CheckFlag(HValue::kUint32)) {
4878     DeoptimizeIfNegative(input.W(), instr->environment());
4879   }
4880   __ SmiTag(output, input);
4881 }
4882 
4883 
DoSmiUntag(LSmiUntag * instr)4884 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4885   Register input = ToRegister(instr->value());
4886   Register result = ToRegister(instr->result());
4887   Label done, untag;
4888 
4889   if (instr->needs_check()) {
4890     DeoptimizeIfNotSmi(input, instr->environment());
4891   }
4892 
4893   __ Bind(&untag);
4894   __ SmiUntag(result, input);
4895   __ Bind(&done);
4896 }
4897 
4898 
DoShiftI(LShiftI * instr)4899 void LCodeGen::DoShiftI(LShiftI* instr) {
4900   LOperand* right_op = instr->right();
4901   Register left = ToRegister32(instr->left());
4902   Register result = ToRegister32(instr->result());
4903 
4904   if (right_op->IsRegister()) {
4905     Register right = ToRegister32(instr->right());
4906     switch (instr->op()) {
4907       case Token::ROR: __ Ror(result, left, right); break;
4908       case Token::SAR: __ Asr(result, left, right); break;
4909       case Token::SHL: __ Lsl(result, left, right); break;
4910       case Token::SHR:
4911         if (instr->can_deopt()) {
4912           Label right_not_zero;
4913           __ Cbnz(right, &right_not_zero);
4914           DeoptimizeIfNegative(left, instr->environment());
4915           __ Bind(&right_not_zero);
4916         }
4917         __ Lsr(result, left, right);
4918         break;
4919       default: UNREACHABLE();
4920     }
4921   } else {
4922     ASSERT(right_op->IsConstantOperand());
4923     int shift_count = JSShiftAmountFromLConstant(right_op);
4924     if (shift_count == 0) {
4925       if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4926         DeoptimizeIfNegative(left, instr->environment());
4927       }
4928       __ Mov(result, left, kDiscardForSameWReg);
4929     } else {
4930       switch (instr->op()) {
4931         case Token::ROR: __ Ror(result, left, shift_count); break;
4932         case Token::SAR: __ Asr(result, left, shift_count); break;
4933         case Token::SHL: __ Lsl(result, left, shift_count); break;
4934         case Token::SHR: __ Lsr(result, left, shift_count); break;
4935         default: UNREACHABLE();
4936       }
4937     }
4938   }
4939 }
4940 
4941 
DoShiftS(LShiftS * instr)4942 void LCodeGen::DoShiftS(LShiftS* instr) {
4943   LOperand* right_op = instr->right();
4944   Register left = ToRegister(instr->left());
4945   Register result = ToRegister(instr->result());
4946 
4947   // Only ROR by register needs a temp.
4948   ASSERT(((instr->op() == Token::ROR) && right_op->IsRegister()) ||
4949          (instr->temp() == NULL));
4950 
4951   if (right_op->IsRegister()) {
4952     Register right = ToRegister(instr->right());
4953     switch (instr->op()) {
4954       case Token::ROR: {
4955         Register temp = ToRegister(instr->temp());
4956         __ Ubfx(temp, right, kSmiShift, 5);
4957         __ SmiUntag(result, left);
4958         __ Ror(result.W(), result.W(), temp.W());
4959         __ SmiTag(result);
4960         break;
4961       }
4962       case Token::SAR:
4963         __ Ubfx(result, right, kSmiShift, 5);
4964         __ Asr(result, left, result);
4965         __ Bic(result, result, kSmiShiftMask);
4966         break;
4967       case Token::SHL:
4968         __ Ubfx(result, right, kSmiShift, 5);
4969         __ Lsl(result, left, result);
4970         break;
4971       case Token::SHR:
4972         if (instr->can_deopt()) {
4973           Label right_not_zero;
4974           __ Cbnz(right, &right_not_zero);
4975           DeoptimizeIfNegative(left, instr->environment());
4976           __ Bind(&right_not_zero);
4977         }
4978         __ Ubfx(result, right, kSmiShift, 5);
4979         __ Lsr(result, left, result);
4980         __ Bic(result, result, kSmiShiftMask);
4981         break;
4982       default: UNREACHABLE();
4983     }
4984   } else {
4985     ASSERT(right_op->IsConstantOperand());
4986     int shift_count = JSShiftAmountFromLConstant(right_op);
4987     if (shift_count == 0) {
4988       if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4989         DeoptimizeIfNegative(left, instr->environment());
4990       }
4991       __ Mov(result, left);
4992     } else {
4993       switch (instr->op()) {
4994         case Token::ROR:
4995           __ SmiUntag(result, left);
4996           __ Ror(result.W(), result.W(), shift_count);
4997           __ SmiTag(result);
4998           break;
4999         case Token::SAR:
5000           __ Asr(result, left, shift_count);
5001           __ Bic(result, result, kSmiShiftMask);
5002           break;
5003         case Token::SHL:
5004           __ Lsl(result, left, shift_count);
5005           break;
5006         case Token::SHR:
5007           __ Lsr(result, left, shift_count);
5008           __ Bic(result, result, kSmiShiftMask);
5009           break;
5010         default: UNREACHABLE();
5011       }
5012     }
5013   }
5014 }
5015 
5016 
DoDebugBreak(LDebugBreak * instr)5017 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
5018   __ Debug("LDebugBreak", 0, BREAK);
5019 }
5020 
5021 
DoDeclareGlobals(LDeclareGlobals * instr)5022 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
5023   ASSERT(ToRegister(instr->context()).is(cp));
5024   Register scratch1 = x5;
5025   Register scratch2 = x6;
5026   ASSERT(instr->IsMarkedAsCall());
5027 
5028   ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
5029   // TODO(all): if Mov could handle object in new space then it could be used
5030   // here.
5031   __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
5032   __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
5033   __ Push(cp, scratch1, scratch2);  // The context is the first argument.
5034   CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
5035 }
5036 
5037 
DoDeferredStackCheck(LStackCheck * instr)5038 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5039   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5040   LoadContextFromDeferred(instr->context());
5041   __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
5042   RecordSafepointWithLazyDeopt(
5043       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5044   ASSERT(instr->HasEnvironment());
5045   LEnvironment* env = instr->environment();
5046   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5047 }
5048 
5049 
DoStackCheck(LStackCheck * instr)5050 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5051   class DeferredStackCheck: public LDeferredCode {
5052    public:
5053     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5054         : LDeferredCode(codegen), instr_(instr) { }
5055     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5056     virtual LInstruction* instr() { return instr_; }
5057    private:
5058     LStackCheck* instr_;
5059   };
5060 
5061   ASSERT(instr->HasEnvironment());
5062   LEnvironment* env = instr->environment();
5063   // There is no LLazyBailout instruction for stack-checks. We have to
5064   // prepare for lazy deoptimization explicitly here.
5065   if (instr->hydrogen()->is_function_entry()) {
5066     // Perform stack overflow check.
5067     Label done;
5068     __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
5069     __ B(hs, &done);
5070 
5071     PredictableCodeSizeScope predictable(masm_,
5072                                          Assembler::kCallSizeWithRelocation);
5073     ASSERT(instr->context()->IsRegister());
5074     ASSERT(ToRegister(instr->context()).is(cp));
5075     CallCode(isolate()->builtins()->StackCheck(),
5076              RelocInfo::CODE_TARGET,
5077              instr);
5078     __ Bind(&done);
5079   } else {
5080     ASSERT(instr->hydrogen()->is_backwards_branch());
5081     // Perform stack overflow check if this goto needs it before jumping.
5082     DeferredStackCheck* deferred_stack_check =
5083         new(zone()) DeferredStackCheck(this, instr);
5084     __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
5085     __ B(lo, deferred_stack_check->entry());
5086 
5087     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5088     __ Bind(instr->done_label());
5089     deferred_stack_check->SetExit(instr->done_label());
5090     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5091     // Don't record a deoptimization index for the safepoint here.
5092     // This will be done explicitly when emitting call and the safepoint in
5093     // the deferred code.
5094   }
5095 }
5096 
5097 
DoStoreCodeEntry(LStoreCodeEntry * instr)5098 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
5099   Register function = ToRegister(instr->function());
5100   Register code_object = ToRegister(instr->code_object());
5101   Register temp = ToRegister(instr->temp());
5102   __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
5103   __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
5104 }
5105 
5106 
DoStoreContextSlot(LStoreContextSlot * instr)5107 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
5108   Register context = ToRegister(instr->context());
5109   Register value = ToRegister(instr->value());
5110   Register scratch = ToRegister(instr->temp());
5111   MemOperand target = ContextMemOperand(context, instr->slot_index());
5112 
5113   Label skip_assignment;
5114 
5115   if (instr->hydrogen()->RequiresHoleCheck()) {
5116     __ Ldr(scratch, target);
5117     if (instr->hydrogen()->DeoptimizesOnHole()) {
5118       DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex,
5119                        instr->environment());
5120     } else {
5121       __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
5122     }
5123   }
5124 
5125   __ Str(value, target);
5126   if (instr->hydrogen()->NeedsWriteBarrier()) {
5127     SmiCheck check_needed =
5128         instr->hydrogen()->value()->type().IsHeapObject()
5129             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5130     __ RecordWriteContextSlot(context,
5131                               target.offset(),
5132                               value,
5133                               scratch,
5134                               GetLinkRegisterState(),
5135                               kSaveFPRegs,
5136                               EMIT_REMEMBERED_SET,
5137                               check_needed);
5138   }
5139   __ Bind(&skip_assignment);
5140 }
5141 
5142 
DoStoreGlobalCell(LStoreGlobalCell * instr)5143 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
5144   Register value = ToRegister(instr->value());
5145   Register cell = ToRegister(instr->temp1());
5146 
5147   // Load the cell.
5148   __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
5149 
5150   // If the cell we are storing to contains the hole it could have
5151   // been deleted from the property dictionary. In that case, we need
5152   // to update the property details in the property dictionary to mark
5153   // it as no longer deleted. We deoptimize in that case.
5154   if (instr->hydrogen()->RequiresHoleCheck()) {
5155     Register payload = ToRegister(instr->temp2());
5156     __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
5157     DeoptimizeIfRoot(
5158         payload, Heap::kTheHoleValueRootIndex, instr->environment());
5159   }
5160 
5161   // Store the value.
5162   __ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
5163   // Cells are always rescanned, so no write barrier here.
5164 }
5165 
5166 
DoStoreKeyedExternal(LStoreKeyedExternal * instr)5167 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
5168   Register ext_ptr = ToRegister(instr->elements());
5169   Register key = no_reg;
5170   Register scratch;
5171   ElementsKind elements_kind = instr->elements_kind();
5172 
5173   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
5174   bool key_is_constant = instr->key()->IsConstantOperand();
5175   int constant_key = 0;
5176   if (key_is_constant) {
5177     ASSERT(instr->temp() == NULL);
5178     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5179     if (constant_key & 0xf0000000) {
5180       Abort(kArrayIndexConstantValueTooBig);
5181     }
5182   } else {
5183     key = ToRegister(instr->key());
5184     scratch = ToRegister(instr->temp());
5185   }
5186 
5187   MemOperand dst =
5188     PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
5189                                      key_is_constant, constant_key,
5190                                      elements_kind,
5191                                      instr->base_offset());
5192 
5193   if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
5194       (elements_kind == FLOAT32_ELEMENTS)) {
5195     DoubleRegister value = ToDoubleRegister(instr->value());
5196     DoubleRegister dbl_scratch = double_scratch();
5197     __ Fcvt(dbl_scratch.S(), value);
5198     __ Str(dbl_scratch.S(), dst);
5199   } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
5200              (elements_kind == FLOAT64_ELEMENTS)) {
5201     DoubleRegister value = ToDoubleRegister(instr->value());
5202     __ Str(value, dst);
5203   } else {
5204     Register value = ToRegister(instr->value());
5205 
5206     switch (elements_kind) {
5207       case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
5208       case EXTERNAL_INT8_ELEMENTS:
5209       case EXTERNAL_UINT8_ELEMENTS:
5210       case UINT8_ELEMENTS:
5211       case UINT8_CLAMPED_ELEMENTS:
5212       case INT8_ELEMENTS:
5213         __ Strb(value, dst);
5214         break;
5215       case EXTERNAL_INT16_ELEMENTS:
5216       case EXTERNAL_UINT16_ELEMENTS:
5217       case INT16_ELEMENTS:
5218       case UINT16_ELEMENTS:
5219         __ Strh(value, dst);
5220         break;
5221       case EXTERNAL_INT32_ELEMENTS:
5222       case EXTERNAL_UINT32_ELEMENTS:
5223       case INT32_ELEMENTS:
5224       case UINT32_ELEMENTS:
5225         __ Str(value.W(), dst);
5226         break;
5227       case FLOAT32_ELEMENTS:
5228       case FLOAT64_ELEMENTS:
5229       case EXTERNAL_FLOAT32_ELEMENTS:
5230       case EXTERNAL_FLOAT64_ELEMENTS:
5231       case FAST_DOUBLE_ELEMENTS:
5232       case FAST_ELEMENTS:
5233       case FAST_SMI_ELEMENTS:
5234       case FAST_HOLEY_DOUBLE_ELEMENTS:
5235       case FAST_HOLEY_ELEMENTS:
5236       case FAST_HOLEY_SMI_ELEMENTS:
5237       case DICTIONARY_ELEMENTS:
5238       case SLOPPY_ARGUMENTS_ELEMENTS:
5239         UNREACHABLE();
5240         break;
5241     }
5242   }
5243 }
5244 
5245 
DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble * instr)5246 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
5247   Register elements = ToRegister(instr->elements());
5248   DoubleRegister value = ToDoubleRegister(instr->value());
5249   MemOperand mem_op;
5250 
5251   if (instr->key()->IsConstantOperand()) {
5252     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5253     if (constant_key & 0xf0000000) {
5254       Abort(kArrayIndexConstantValueTooBig);
5255     }
5256     int offset = instr->base_offset() + constant_key * kDoubleSize;
5257     mem_op = MemOperand(elements, offset);
5258   } else {
5259     Register store_base = ToRegister(instr->temp());
5260     Register key = ToRegister(instr->key());
5261     bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5262     mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
5263                                       instr->hydrogen()->elements_kind(),
5264                                       instr->hydrogen()->representation(),
5265                                       instr->base_offset());
5266   }
5267 
5268   if (instr->NeedsCanonicalization()) {
5269     __ CanonicalizeNaN(double_scratch(), value);
5270     __ Str(double_scratch(), mem_op);
5271   } else {
5272     __ Str(value, mem_op);
5273   }
5274 }
5275 
5276 
DoStoreKeyedFixed(LStoreKeyedFixed * instr)5277 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
5278   Register value = ToRegister(instr->value());
5279   Register elements = ToRegister(instr->elements());
5280   Register scratch = no_reg;
5281   Register store_base = no_reg;
5282   Register key = no_reg;
5283   MemOperand mem_op;
5284 
5285   if (!instr->key()->IsConstantOperand() ||
5286       instr->hydrogen()->NeedsWriteBarrier()) {
5287     scratch = ToRegister(instr->temp());
5288   }
5289 
5290   Representation representation = instr->hydrogen()->value()->representation();
5291   if (instr->key()->IsConstantOperand()) {
5292     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
5293     int offset = instr->base_offset() +
5294         ToInteger32(const_operand) * kPointerSize;
5295     store_base = elements;
5296     if (representation.IsInteger32()) {
5297       ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5298       ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
5299       STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) &&
5300                     (kSmiTag == 0));
5301       mem_op = UntagSmiMemOperand(store_base, offset);
5302     } else {
5303       mem_op = MemOperand(store_base, offset);
5304     }
5305   } else {
5306     store_base = scratch;
5307     key = ToRegister(instr->key());
5308     bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5309 
5310     mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
5311                                       instr->hydrogen()->elements_kind(),
5312                                       representation, instr->base_offset());
5313   }
5314 
5315   __ Store(value, mem_op, representation);
5316 
5317   if (instr->hydrogen()->NeedsWriteBarrier()) {
5318     ASSERT(representation.IsTagged());
5319     // This assignment may cause element_addr to alias store_base.
5320     Register element_addr = scratch;
5321     SmiCheck check_needed =
5322         instr->hydrogen()->value()->type().IsHeapObject()
5323             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5324     // Compute address of modified element and store it into key register.
5325     __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand());
5326     __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
5327                    kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed,
5328                    instr->hydrogen()->PointersToHereCheckForValue());
5329   }
5330 }
5331 
5332 
DoStoreKeyedGeneric(LStoreKeyedGeneric * instr)5333 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
5334   ASSERT(ToRegister(instr->context()).is(cp));
5335   ASSERT(ToRegister(instr->object()).Is(x2));
5336   ASSERT(ToRegister(instr->key()).Is(x1));
5337   ASSERT(ToRegister(instr->value()).Is(x0));
5338 
5339   Handle<Code> ic = instr->strict_mode() == STRICT
5340       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
5341       : isolate()->builtins()->KeyedStoreIC_Initialize();
5342   CallCode(ic, RelocInfo::CODE_TARGET, instr);
5343 }
5344 
5345 
DoStoreNamedField(LStoreNamedField * instr)5346 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
5347   Representation representation = instr->representation();
5348 
5349   Register object = ToRegister(instr->object());
5350   HObjectAccess access = instr->hydrogen()->access();
5351   int offset = access.offset();
5352 
5353   if (access.IsExternalMemory()) {
5354     ASSERT(!instr->hydrogen()->has_transition());
5355     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
5356     Register value = ToRegister(instr->value());
5357     __ Store(value, MemOperand(object, offset), representation);
5358     return;
5359   }
5360 
5361   __ AssertNotSmi(object);
5362 
5363   if (representation.IsDouble()) {
5364     ASSERT(access.IsInobject());
5365     ASSERT(!instr->hydrogen()->has_transition());
5366     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
5367     FPRegister value = ToDoubleRegister(instr->value());
5368     __ Str(value, FieldMemOperand(object, offset));
5369     return;
5370   }
5371 
5372   Register value = ToRegister(instr->value());
5373 
5374   ASSERT(!representation.IsSmi() ||
5375          !instr->value()->IsConstantOperand() ||
5376          IsInteger32Constant(LConstantOperand::cast(instr->value())));
5377 
5378   if (instr->hydrogen()->has_transition()) {
5379     Handle<Map> transition = instr->hydrogen()->transition_map();
5380     AddDeprecationDependency(transition);
5381     // Store the new map value.
5382     Register new_map_value = ToRegister(instr->temp0());
5383     __ Mov(new_map_value, Operand(transition));
5384     __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
5385     if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
5386       // Update the write barrier for the map field.
5387       __ RecordWriteForMap(object,
5388                            new_map_value,
5389                            ToRegister(instr->temp1()),
5390                            GetLinkRegisterState(),
5391                            kSaveFPRegs);
5392     }
5393   }
5394 
5395   // Do the store.
5396   Register destination;
5397   if (access.IsInobject()) {
5398     destination = object;
5399   } else {
5400     Register temp0 = ToRegister(instr->temp0());
5401     __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
5402     destination = temp0;
5403   }
5404 
5405   if (representation.IsSmi() &&
5406      instr->hydrogen()->value()->representation().IsInteger32()) {
5407     ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5408 #ifdef DEBUG
5409     Register temp0 = ToRegister(instr->temp0());
5410     __ Ldr(temp0, FieldMemOperand(destination, offset));
5411     __ AssertSmi(temp0);
5412     // If destination aliased temp0, restore it to the address calculated
5413     // earlier.
5414     if (destination.Is(temp0)) {
5415       ASSERT(!access.IsInobject());
5416       __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
5417     }
5418 #endif
5419     STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
5420     __ Store(value, UntagSmiFieldMemOperand(destination, offset),
5421              Representation::Integer32());
5422   } else {
5423     __ Store(value, FieldMemOperand(destination, offset), representation);
5424   }
5425   if (instr->hydrogen()->NeedsWriteBarrier()) {
5426     __ RecordWriteField(destination,
5427                         offset,
5428                         value,                        // Clobbered.
5429                         ToRegister(instr->temp1()),   // Clobbered.
5430                         GetLinkRegisterState(),
5431                         kSaveFPRegs,
5432                         EMIT_REMEMBERED_SET,
5433                         instr->hydrogen()->SmiCheckForWriteBarrier(),
5434                         instr->hydrogen()->PointersToHereCheckForValue());
5435   }
5436 }
5437 
5438 
DoStoreNamedGeneric(LStoreNamedGeneric * instr)5439 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
5440   ASSERT(ToRegister(instr->context()).is(cp));
5441   ASSERT(ToRegister(instr->value()).is(x0));
5442   ASSERT(ToRegister(instr->object()).is(x1));
5443 
5444   // Name must be in x2.
5445   __ Mov(x2, Operand(instr->name()));
5446   Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
5447   CallCode(ic, RelocInfo::CODE_TARGET, instr);
5448 }
5449 
5450 
DoStringAdd(LStringAdd * instr)5451 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5452   ASSERT(ToRegister(instr->context()).is(cp));
5453   ASSERT(ToRegister(instr->left()).Is(x1));
5454   ASSERT(ToRegister(instr->right()).Is(x0));
5455   StringAddStub stub(isolate(),
5456                      instr->hydrogen()->flags(),
5457                      instr->hydrogen()->pretenure_flag());
5458   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5459 }
5460 
5461 
DoStringCharCodeAt(LStringCharCodeAt * instr)5462 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5463   class DeferredStringCharCodeAt: public LDeferredCode {
5464    public:
5465     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
5466         : LDeferredCode(codegen), instr_(instr) { }
5467     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
5468     virtual LInstruction* instr() { return instr_; }
5469    private:
5470     LStringCharCodeAt* instr_;
5471   };
5472 
5473   DeferredStringCharCodeAt* deferred =
5474       new(zone()) DeferredStringCharCodeAt(this, instr);
5475 
5476   StringCharLoadGenerator::Generate(masm(),
5477                                     ToRegister(instr->string()),
5478                                     ToRegister32(instr->index()),
5479                                     ToRegister(instr->result()),
5480                                     deferred->entry());
5481   __ Bind(deferred->exit());
5482 }
5483 
5484 
DoDeferredStringCharCodeAt(LStringCharCodeAt * instr)5485 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
5486   Register string = ToRegister(instr->string());
5487   Register result = ToRegister(instr->result());
5488 
5489   // TODO(3095996): Get rid of this. For now, we need to make the
5490   // result register contain a valid pointer because it is already
5491   // contained in the register pointer map.
5492   __ Mov(result, 0);
5493 
5494   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5495   __ Push(string);
5496   // Push the index as a smi. This is safe because of the checks in
5497   // DoStringCharCodeAt above.
5498   Register index = ToRegister(instr->index());
5499   __ SmiTagAndPush(index);
5500 
5501   CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
5502                           instr->context());
5503   __ AssertSmi(x0);
5504   __ SmiUntag(x0);
5505   __ StoreToSafepointRegisterSlot(x0, result);
5506 }
5507 
5508 
DoStringCharFromCode(LStringCharFromCode * instr)5509 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5510   class DeferredStringCharFromCode: public LDeferredCode {
5511    public:
5512     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
5513         : LDeferredCode(codegen), instr_(instr) { }
5514     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
5515     virtual LInstruction* instr() { return instr_; }
5516    private:
5517     LStringCharFromCode* instr_;
5518   };
5519 
5520   DeferredStringCharFromCode* deferred =
5521       new(zone()) DeferredStringCharFromCode(this, instr);
5522 
5523   ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
5524   Register char_code = ToRegister32(instr->char_code());
5525   Register result = ToRegister(instr->result());
5526 
5527   __ Cmp(char_code, String::kMaxOneByteCharCode);
5528   __ B(hi, deferred->entry());
5529   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5530   __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag);
5531   __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2));
5532   __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5533   __ B(eq, deferred->entry());
5534   __ Bind(deferred->exit());
5535 }
5536 
5537 
DoDeferredStringCharFromCode(LStringCharFromCode * instr)5538 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5539   Register char_code = ToRegister(instr->char_code());
5540   Register result = ToRegister(instr->result());
5541 
5542   // TODO(3095996): Get rid of this. For now, we need to make the
5543   // result register contain a valid pointer because it is already
5544   // contained in the register pointer map.
5545   __ Mov(result, 0);
5546 
5547   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5548   __ SmiTagAndPush(char_code);
5549   CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5550   __ StoreToSafepointRegisterSlot(x0, result);
5551 }
5552 
5553 
DoStringCompareAndBranch(LStringCompareAndBranch * instr)5554 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
5555   ASSERT(ToRegister(instr->context()).is(cp));
5556   Token::Value op = instr->op();
5557 
5558   Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
5559   CallCode(ic, RelocInfo::CODE_TARGET, instr);
5560   InlineSmiCheckInfo::EmitNotInlined(masm());
5561 
5562   Condition condition = TokenToCondition(op, false);
5563 
5564   EmitCompareAndBranch(instr, condition, x0, 0);
5565 }
5566 
5567 
DoSubI(LSubI * instr)5568 void LCodeGen::DoSubI(LSubI* instr) {
5569   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5570   Register result = ToRegister32(instr->result());
5571   Register left = ToRegister32(instr->left());
5572   Operand right = ToShiftedRightOperand32I(instr->right(), instr);
5573 
5574   if (can_overflow) {
5575     __ Subs(result, left, right);
5576     DeoptimizeIf(vs, instr->environment());
5577   } else {
5578     __ Sub(result, left, right);
5579   }
5580 }
5581 
5582 
DoSubS(LSubS * instr)5583 void LCodeGen::DoSubS(LSubS* instr) {
5584   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5585   Register result = ToRegister(instr->result());
5586   Register left = ToRegister(instr->left());
5587   Operand right = ToOperand(instr->right());
5588   if (can_overflow) {
5589     __ Subs(result, left, right);
5590     DeoptimizeIf(vs, instr->environment());
5591   } else {
5592     __ Sub(result, left, right);
5593   }
5594 }
5595 
5596 
DoDeferredTaggedToI(LTaggedToI * instr,LOperand * value,LOperand * temp1,LOperand * temp2)5597 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
5598                                    LOperand* value,
5599                                    LOperand* temp1,
5600                                    LOperand* temp2) {
5601   Register input = ToRegister(value);
5602   Register scratch1 = ToRegister(temp1);
5603   DoubleRegister dbl_scratch1 = double_scratch();
5604 
5605   Label done;
5606 
5607   // Load heap object map.
5608   __ Ldr(scratch1, FieldMemOperand(input, HeapObject::kMapOffset));
5609 
5610   if (instr->truncating()) {
5611     Register output = ToRegister(instr->result());
5612     Label check_bools;
5613 
5614     // If it's not a heap number, jump to undefined check.
5615     __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools);
5616 
5617     // A heap number: load value and convert to int32 using truncating function.
5618     __ TruncateHeapNumberToI(output, input);
5619     __ B(&done);
5620 
5621     __ Bind(&check_bools);
5622 
5623     Register true_root = output;
5624     Register false_root = scratch1;
5625     __ LoadTrueFalseRoots(true_root, false_root);
5626     __ Cmp(input, true_root);
5627     __ Cset(output, eq);
5628     __ Ccmp(input, false_root, ZFlag, ne);
5629     __ B(eq, &done);
5630 
5631     // Output contains zero, undefined is converted to zero for truncating
5632     // conversions.
5633     DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
5634                         instr->environment());
5635   } else {
5636     Register output = ToRegister32(instr->result());
5637 
5638     DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
5639 
5640     // Deoptimized if it's not a heap number.
5641     DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex,
5642                         instr->environment());
5643 
5644     // A heap number: load value and convert to int32 using non-truncating
5645     // function. If the result is out of range, branch to deoptimize.
5646     __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
5647     __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
5648     DeoptimizeIf(ne, instr->environment());
5649 
5650     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5651       __ Cmp(output, 0);
5652       __ B(ne, &done);
5653       __ Fmov(scratch1, dbl_scratch1);
5654       DeoptimizeIfNegative(scratch1, instr->environment());
5655     }
5656   }
5657   __ Bind(&done);
5658 }
5659 
5660 
DoTaggedToI(LTaggedToI * instr)5661 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5662   class DeferredTaggedToI: public LDeferredCode {
5663    public:
5664     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5665         : LDeferredCode(codegen), instr_(instr) { }
5666     virtual void Generate() {
5667       codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
5668                                      instr_->temp2());
5669     }
5670 
5671     virtual LInstruction* instr() { return instr_; }
5672    private:
5673     LTaggedToI* instr_;
5674   };
5675 
5676   Register input = ToRegister(instr->value());
5677   Register output = ToRegister(instr->result());
5678 
5679   if (instr->hydrogen()->value()->representation().IsSmi()) {
5680     __ SmiUntag(output, input);
5681   } else {
5682     DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5683 
5684     __ JumpIfNotSmi(input, deferred->entry());
5685     __ SmiUntag(output, input);
5686     __ Bind(deferred->exit());
5687   }
5688 }
5689 
5690 
DoThisFunction(LThisFunction * instr)5691 void LCodeGen::DoThisFunction(LThisFunction* instr) {
5692   Register result = ToRegister(instr->result());
5693   __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5694 }
5695 
5696 
DoToFastProperties(LToFastProperties * instr)5697 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5698   ASSERT(ToRegister(instr->value()).Is(x0));
5699   ASSERT(ToRegister(instr->result()).Is(x0));
5700   __ Push(x0);
5701   CallRuntime(Runtime::kToFastProperties, 1, instr);
5702 }
5703 
5704 
DoRegExpLiteral(LRegExpLiteral * instr)5705 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5706   ASSERT(ToRegister(instr->context()).is(cp));
5707   Label materialized;
5708   // Registers will be used as follows:
5709   // x7 = literals array.
5710   // x1 = regexp literal.
5711   // x0 = regexp literal clone.
5712   // x10-x12 are used as temporaries.
5713   int literal_offset =
5714       FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5715   __ LoadObject(x7, instr->hydrogen()->literals());
5716   __ Ldr(x1, FieldMemOperand(x7, literal_offset));
5717   __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
5718 
5719   // Create regexp literal using runtime function
5720   // Result will be in x0.
5721   __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5722   __ Mov(x11, Operand(instr->hydrogen()->pattern()));
5723   __ Mov(x10, Operand(instr->hydrogen()->flags()));
5724   __ Push(x7, x12, x11, x10);
5725   CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
5726   __ Mov(x1, x0);
5727 
5728   __ Bind(&materialized);
5729   int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5730   Label allocated, runtime_allocate;
5731 
5732   __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
5733   __ B(&allocated);
5734 
5735   __ Bind(&runtime_allocate);
5736   __ Mov(x0, Smi::FromInt(size));
5737   __ Push(x1, x0);
5738   CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
5739   __ Pop(x1);
5740 
5741   __ Bind(&allocated);
5742   // Copy the content into the newly allocated memory.
5743   __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
5744 }
5745 
5746 
DoTransitionElementsKind(LTransitionElementsKind * instr)5747 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
5748   Register object = ToRegister(instr->object());
5749 
5750   Handle<Map> from_map = instr->original_map();
5751   Handle<Map> to_map = instr->transitioned_map();
5752   ElementsKind from_kind = instr->from_kind();
5753   ElementsKind to_kind = instr->to_kind();
5754 
5755   Label not_applicable;
5756 
5757   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
5758     Register temp1 = ToRegister(instr->temp1());
5759     Register new_map = ToRegister(instr->temp2());
5760     __ CheckMap(object, temp1, from_map, &not_applicable, DONT_DO_SMI_CHECK);
5761     __ Mov(new_map, Operand(to_map));
5762     __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
5763     // Write barrier.
5764     __ RecordWriteForMap(object, new_map, temp1, GetLinkRegisterState(),
5765                          kDontSaveFPRegs);
5766   } else {
5767     {
5768       UseScratchRegisterScope temps(masm());
5769       // Use the temp register only in a restricted scope - the codegen checks
5770       // that we do not use any register across a call.
5771       __ CheckMap(object, temps.AcquireX(), from_map, &not_applicable,
5772                   DONT_DO_SMI_CHECK);
5773     }
5774     ASSERT(object.is(x0));
5775     ASSERT(ToRegister(instr->context()).is(cp));
5776     PushSafepointRegistersScope scope(
5777         this, Safepoint::kWithRegistersAndDoubles);
5778     __ Mov(x1, Operand(to_map));
5779     bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
5780     TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
5781     __ CallStub(&stub);
5782     RecordSafepointWithRegistersAndDoubles(
5783         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
5784   }
5785   __ Bind(&not_applicable);
5786 }
5787 
5788 
DoTrapAllocationMemento(LTrapAllocationMemento * instr)5789 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5790   Register object = ToRegister(instr->object());
5791   Register temp1 = ToRegister(instr->temp1());
5792   Register temp2 = ToRegister(instr->temp2());
5793 
5794   Label no_memento_found;
5795   __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
5796   DeoptimizeIf(eq, instr->environment());
5797   __ Bind(&no_memento_found);
5798 }
5799 
5800 
DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi * instr)5801 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
5802   DoubleRegister input = ToDoubleRegister(instr->value());
5803   Register result = ToRegister(instr->result());
5804   __ TruncateDoubleToI(result, input);
5805   if (instr->tag_result()) {
5806     __ SmiTag(result, result);
5807   }
5808 }
5809 
5810 
DoTypeof(LTypeof * instr)5811 void LCodeGen::DoTypeof(LTypeof* instr) {
5812   Register input = ToRegister(instr->value());
5813   __ Push(input);
5814   CallRuntime(Runtime::kTypeof, 1, instr);
5815 }
5816 
5817 
DoTypeofIsAndBranch(LTypeofIsAndBranch * instr)5818 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5819   Handle<String> type_name = instr->type_literal();
5820   Label* true_label = instr->TrueLabel(chunk_);
5821   Label* false_label = instr->FalseLabel(chunk_);
5822   Register value = ToRegister(instr->value());
5823 
5824   Factory* factory = isolate()->factory();
5825   if (String::Equals(type_name, factory->number_string())) {
5826     ASSERT(instr->temp1() != NULL);
5827     Register map = ToRegister(instr->temp1());
5828 
5829     __ JumpIfSmi(value, true_label);
5830     __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
5831     __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
5832     EmitBranch(instr, eq);
5833 
5834   } else if (String::Equals(type_name, factory->string_string())) {
5835     ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
5836     Register map = ToRegister(instr->temp1());
5837     Register scratch = ToRegister(instr->temp2());
5838 
5839     __ JumpIfSmi(value, false_label);
5840     __ JumpIfObjectType(
5841         value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
5842     __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5843     EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5844 
5845   } else if (String::Equals(type_name, factory->symbol_string())) {
5846     ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
5847     Register map = ToRegister(instr->temp1());
5848     Register scratch = ToRegister(instr->temp2());
5849 
5850     __ JumpIfSmi(value, false_label);
5851     __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
5852     EmitBranch(instr, eq);
5853 
5854   } else if (String::Equals(type_name, factory->boolean_string())) {
5855     __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
5856     __ CompareRoot(value, Heap::kFalseValueRootIndex);
5857     EmitBranch(instr, eq);
5858 
5859   } else if (FLAG_harmony_typeof &&
5860              String::Equals(type_name, factory->null_string())) {
5861     __ CompareRoot(value, Heap::kNullValueRootIndex);
5862     EmitBranch(instr, eq);
5863 
5864   } else if (String::Equals(type_name, factory->undefined_string())) {
5865     ASSERT(instr->temp1() != NULL);
5866     Register scratch = ToRegister(instr->temp1());
5867 
5868     __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
5869     __ JumpIfSmi(value, false_label);
5870     // Check for undetectable objects and jump to the true branch in this case.
5871     __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5872     __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5873     EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
5874 
5875   } else if (String::Equals(type_name, factory->function_string())) {
5876     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5877     ASSERT(instr->temp1() != NULL);
5878     Register type = ToRegister(instr->temp1());
5879 
5880     __ JumpIfSmi(value, false_label);
5881     __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
5882     // HeapObject's type has been loaded into type register by JumpIfObjectType.
5883     EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
5884 
5885   } else if (String::Equals(type_name, factory->object_string())) {
5886     ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
5887     Register map = ToRegister(instr->temp1());
5888     Register scratch = ToRegister(instr->temp2());
5889 
5890     __ JumpIfSmi(value, false_label);
5891     if (!FLAG_harmony_typeof) {
5892       __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
5893     }
5894     __ JumpIfObjectType(value, map, scratch,
5895                         FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
5896     __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5897     __ B(gt, false_label);
5898     // Check for undetectable objects => false.
5899     __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5900     EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5901 
5902   } else {
5903     __ B(false_label);
5904   }
5905 }
5906 
5907 
DoUint32ToDouble(LUint32ToDouble * instr)5908 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5909   __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
5910 }
5911 
5912 
DoCheckMapValue(LCheckMapValue * instr)5913 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5914   Register object = ToRegister(instr->value());
5915   Register map = ToRegister(instr->map());
5916   Register temp = ToRegister(instr->temp());
5917   __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
5918   __ Cmp(map, temp);
5919   DeoptimizeIf(ne, instr->environment());
5920 }
5921 
5922 
DoWrapReceiver(LWrapReceiver * instr)5923 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
5924   Register receiver = ToRegister(instr->receiver());
5925   Register function = ToRegister(instr->function());
5926   Register result = ToRegister(instr->result());
5927 
5928   // If the receiver is null or undefined, we have to pass the global object as
5929   // a receiver to normal functions. Values have to be passed unchanged to
5930   // builtins and strict-mode functions.
5931   Label global_object, done, copy_receiver;
5932 
5933   if (!instr->hydrogen()->known_function()) {
5934     __ Ldr(result, FieldMemOperand(function,
5935                                    JSFunction::kSharedFunctionInfoOffset));
5936 
5937     // CompilerHints is an int32 field. See objects.h.
5938     __ Ldr(result.W(),
5939            FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
5940 
5941     // Do not transform the receiver to object for strict mode functions.
5942     __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &copy_receiver);
5943 
5944     // Do not transform the receiver to object for builtins.
5945     __ Tbnz(result, SharedFunctionInfo::kNative, &copy_receiver);
5946   }
5947 
5948   // Normal function. Replace undefined or null with global receiver.
5949   __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
5950   __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
5951 
5952   // Deoptimize if the receiver is not a JS object.
5953   DeoptimizeIfSmi(receiver, instr->environment());
5954   __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
5955   __ B(ge, &copy_receiver);
5956   Deoptimize(instr->environment());
5957 
5958   __ Bind(&global_object);
5959   __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
5960   __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
5961   __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
5962   __ B(&done);
5963 
5964   __ Bind(&copy_receiver);
5965   __ Mov(result, receiver);
5966   __ Bind(&done);
5967 }
5968 
5969 
DoDeferredLoadMutableDouble(LLoadFieldByIndex * instr,Register result,Register object,Register index)5970 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5971                                            Register result,
5972                                            Register object,
5973                                            Register index) {
5974   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5975   __ Push(object);
5976   __ Push(index);
5977   __ Mov(cp, 0);
5978   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5979   RecordSafepointWithRegisters(
5980       instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5981   __ StoreToSafepointRegisterSlot(x0, result);
5982 }
5983 
5984 
DoLoadFieldByIndex(LLoadFieldByIndex * instr)5985 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5986   class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
5987    public:
5988     DeferredLoadMutableDouble(LCodeGen* codegen,
5989                               LLoadFieldByIndex* instr,
5990                               Register result,
5991                               Register object,
5992                               Register index)
5993         : LDeferredCode(codegen),
5994           instr_(instr),
5995           result_(result),
5996           object_(object),
5997           index_(index) {
5998     }
5999     virtual void Generate() V8_OVERRIDE {
6000       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
6001     }
6002     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6003    private:
6004     LLoadFieldByIndex* instr_;
6005     Register result_;
6006     Register object_;
6007     Register index_;
6008   };
6009   Register object = ToRegister(instr->object());
6010   Register index = ToRegister(instr->index());
6011   Register result = ToRegister(instr->result());
6012 
6013   __ AssertSmi(index);
6014 
6015   DeferredLoadMutableDouble* deferred;
6016   deferred = new(zone()) DeferredLoadMutableDouble(
6017       this, instr, result, object, index);
6018 
6019   Label out_of_object, done;
6020 
6021   __ TestAndBranchIfAnySet(
6022       index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
6023   __ Mov(index, Operand(index, ASR, 1));
6024 
6025   __ Cmp(index, Smi::FromInt(0));
6026   __ B(lt, &out_of_object);
6027 
6028   STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
6029   __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
6030   __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
6031 
6032   __ B(&done);
6033 
6034   __ Bind(&out_of_object);
6035   __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
6036   // Index is equal to negated out of object property index plus 1.
6037   __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
6038   __ Ldr(result, FieldMemOperand(result,
6039                                  FixedArray::kHeaderSize - kPointerSize));
6040   __ Bind(deferred->exit());
6041   __ Bind(&done);
6042 }
6043 
6044 
DoStoreFrameContext(LStoreFrameContext * instr)6045 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
6046   Register context = ToRegister(instr->context());
6047   __ Str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
6048 }
6049 
6050 
DoAllocateBlockContext(LAllocateBlockContext * instr)6051 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6052   Handle<ScopeInfo> scope_info = instr->scope_info();
6053   __ Push(scope_info);
6054   __ Push(ToRegister(instr->function()));
6055   CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
6056   RecordSafepoint(Safepoint::kNoLazyDeopt);
6057 }
6058 
6059 
6060 
6061 } }  // namespace v8::internal
6062