1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "arm/lithium-codegen-arm.h"
31 #include "arm/lithium-gap-resolver-arm.h"
32 #include "code-stubs.h"
33 #include "stub-cache.h"
34
35 namespace v8 {
36 namespace internal {
37
38
39 class SafepointGenerator : public CallWrapper {
40 public:
SafepointGenerator(LCodeGen * codegen,LPointerMap * pointers,Safepoint::DeoptMode mode)41 SafepointGenerator(LCodeGen* codegen,
42 LPointerMap* pointers,
43 Safepoint::DeoptMode mode)
44 : codegen_(codegen),
45 pointers_(pointers),
46 deopt_mode_(mode) { }
~SafepointGenerator()47 virtual ~SafepointGenerator() { }
48
BeforeCall(int call_size) const49 virtual void BeforeCall(int call_size) const { }
50
AfterCall() const51 virtual void AfterCall() const {
52 codegen_->RecordSafepoint(pointers_, deopt_mode_);
53 }
54
55 private:
56 LCodeGen* codegen_;
57 LPointerMap* pointers_;
58 Safepoint::DeoptMode deopt_mode_;
59 };
60
61
62 #define __ masm()->
63
GenerateCode()64 bool LCodeGen::GenerateCode() {
65 HPhase phase("Z_Code generation", chunk());
66 ASSERT(is_unused());
67 status_ = GENERATING;
68 CpuFeatures::Scope scope1(VFP3);
69 CpuFeatures::Scope scope2(ARMv7);
70
71 CodeStub::GenerateFPStubs();
72
73 // Open a frame scope to indicate that there is a frame on the stack. The
74 // NONE indicates that the scope shouldn't actually generate code to set up
75 // the frame (that is done in GeneratePrologue).
76 FrameScope frame_scope(masm_, StackFrame::NONE);
77
78 return GeneratePrologue() &&
79 GenerateBody() &&
80 GenerateDeferredCode() &&
81 GenerateDeoptJumpTable() &&
82 GenerateSafepointTable();
83 }
84
85
FinishCode(Handle<Code> code)86 void LCodeGen::FinishCode(Handle<Code> code) {
87 ASSERT(is_done());
88 code->set_stack_slots(GetStackSlotCount());
89 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
90 PopulateDeoptimizationData(code);
91 }
92
93
Abort(const char * format,...)94 void LCodeGen::Abort(const char* format, ...) {
95 if (FLAG_trace_bailout) {
96 SmartArrayPointer<char> name(
97 info()->shared_info()->DebugName()->ToCString());
98 PrintF("Aborting LCodeGen in @\"%s\": ", *name);
99 va_list arguments;
100 va_start(arguments, format);
101 OS::VPrint(format, arguments);
102 va_end(arguments);
103 PrintF("\n");
104 }
105 status_ = ABORTED;
106 }
107
108
Comment(const char * format,...)109 void LCodeGen::Comment(const char* format, ...) {
110 if (!FLAG_code_comments) return;
111 char buffer[4 * KB];
112 StringBuilder builder(buffer, ARRAY_SIZE(buffer));
113 va_list arguments;
114 va_start(arguments, format);
115 builder.AddFormattedList(format, arguments);
116 va_end(arguments);
117
118 // Copy the string before recording it in the assembler to avoid
119 // issues when the stack allocated buffer goes out of scope.
120 size_t length = builder.position();
121 Vector<char> copy = Vector<char>::New(length + 1);
122 memcpy(copy.start(), builder.Finalize(), copy.length());
123 masm()->RecordComment(copy.start());
124 }
125
126
GeneratePrologue()127 bool LCodeGen::GeneratePrologue() {
128 ASSERT(is_generating());
129
130 #ifdef DEBUG
131 if (strlen(FLAG_stop_at) > 0 &&
132 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
133 __ stop("stop_at");
134 }
135 #endif
136
137 // r1: Callee's JS function.
138 // cp: Callee's context.
139 // fp: Caller's frame pointer.
140 // lr: Caller's pc.
141
142 // Strict mode functions and builtins need to replace the receiver
143 // with undefined when called as functions (without an explicit
144 // receiver object). r5 is zero for method calls and non-zero for
145 // function calls.
146 if (!info_->is_classic_mode() || info_->is_native()) {
147 Label ok;
148 __ cmp(r5, Operand(0));
149 __ b(eq, &ok);
150 int receiver_offset = scope()->num_parameters() * kPointerSize;
151 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
152 __ str(r2, MemOperand(sp, receiver_offset));
153 __ bind(&ok);
154 }
155
156 __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
157 __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP.
158
159 // Reserve space for the stack slots needed by the code.
160 int slots = GetStackSlotCount();
161 if (slots > 0) {
162 if (FLAG_debug_code) {
163 __ mov(r0, Operand(slots));
164 __ mov(r2, Operand(kSlotsZapValue));
165 Label loop;
166 __ bind(&loop);
167 __ push(r2);
168 __ sub(r0, r0, Operand(1), SetCC);
169 __ b(ne, &loop);
170 } else {
171 __ sub(sp, sp, Operand(slots * kPointerSize));
172 }
173 }
174
175 // Possibly allocate a local context.
176 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
177 if (heap_slots > 0) {
178 Comment(";;; Allocate local context");
179 // Argument to NewContext is the function, which is in r1.
180 __ push(r1);
181 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
182 FastNewContextStub stub(heap_slots);
183 __ CallStub(&stub);
184 } else {
185 __ CallRuntime(Runtime::kNewFunctionContext, 1);
186 }
187 RecordSafepoint(Safepoint::kNoLazyDeopt);
188 // Context is returned in both r0 and cp. It replaces the context
189 // passed to us. It's saved in the stack and kept live in cp.
190 __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
191 // Copy any necessary parameters into the context.
192 int num_parameters = scope()->num_parameters();
193 for (int i = 0; i < num_parameters; i++) {
194 Variable* var = scope()->parameter(i);
195 if (var->IsContextSlot()) {
196 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
197 (num_parameters - 1 - i) * kPointerSize;
198 // Load parameter from stack.
199 __ ldr(r0, MemOperand(fp, parameter_offset));
200 // Store it in the context.
201 MemOperand target = ContextOperand(cp, var->index());
202 __ str(r0, target);
203 // Update the write barrier. This clobbers r3 and r0.
204 __ RecordWriteContextSlot(
205 cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
206 }
207 }
208 Comment(";;; End allocate local context");
209 }
210
211 // Trace the call.
212 if (FLAG_trace) {
213 __ CallRuntime(Runtime::kTraceEnter, 0);
214 }
215 return !is_aborted();
216 }
217
218
GenerateBody()219 bool LCodeGen::GenerateBody() {
220 ASSERT(is_generating());
221 bool emit_instructions = true;
222 for (current_instruction_ = 0;
223 !is_aborted() && current_instruction_ < instructions_->length();
224 current_instruction_++) {
225 LInstruction* instr = instructions_->at(current_instruction_);
226 if (instr->IsLabel()) {
227 LLabel* label = LLabel::cast(instr);
228 emit_instructions = !label->HasReplacement();
229 }
230
231 if (emit_instructions) {
232 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
233 instr->CompileToNative(this);
234 }
235 }
236 EnsureSpaceForLazyDeopt();
237 return !is_aborted();
238 }
239
240
GenerateDeferredCode()241 bool LCodeGen::GenerateDeferredCode() {
242 ASSERT(is_generating());
243 if (deferred_.length() > 0) {
244 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
245 LDeferredCode* code = deferred_[i];
246 __ bind(code->entry());
247 Comment(";;; Deferred code @%d: %s.",
248 code->instruction_index(),
249 code->instr()->Mnemonic());
250 code->Generate();
251 __ jmp(code->exit());
252 }
253 }
254
255 // Force constant pool emission at the end of the deferred code to make
256 // sure that no constant pools are emitted after.
257 masm()->CheckConstPool(true, false);
258
259 return !is_aborted();
260 }
261
262
GenerateDeoptJumpTable()263 bool LCodeGen::GenerateDeoptJumpTable() {
264 // Check that the jump table is accessible from everywhere in the function
265 // code, i.e. that offsets to the table can be encoded in the 24bit signed
266 // immediate of a branch instruction.
267 // To simplify we consider the code size from the first instruction to the
268 // end of the jump table. We also don't consider the pc load delta.
269 // Each entry in the jump table generates one instruction and inlines one
270 // 32bit data after it.
271 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
272 deopt_jump_table_.length() * 2)) {
273 Abort("Generated code is too large");
274 }
275
276 // Block the constant pool emission during the jump table emission.
277 __ BlockConstPoolFor(deopt_jump_table_.length());
278 __ RecordComment("[ Deoptimisation jump table");
279 Label table_start;
280 __ bind(&table_start);
281 for (int i = 0; i < deopt_jump_table_.length(); i++) {
282 __ bind(&deopt_jump_table_[i].label);
283 __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta));
284 __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
285 }
286 ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
287 deopt_jump_table_.length() * 2);
288 __ RecordComment("]");
289
290 // The deoptimization jump table is the last part of the instruction
291 // sequence. Mark the generated code as done unless we bailed out.
292 if (!is_aborted()) status_ = DONE;
293 return !is_aborted();
294 }
295
296
GenerateSafepointTable()297 bool LCodeGen::GenerateSafepointTable() {
298 ASSERT(is_done());
299 safepoints_.Emit(masm(), GetStackSlotCount());
300 return !is_aborted();
301 }
302
303
ToRegister(int index) const304 Register LCodeGen::ToRegister(int index) const {
305 return Register::FromAllocationIndex(index);
306 }
307
308
ToDoubleRegister(int index) const309 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
310 return DoubleRegister::FromAllocationIndex(index);
311 }
312
313
ToRegister(LOperand * op) const314 Register LCodeGen::ToRegister(LOperand* op) const {
315 ASSERT(op->IsRegister());
316 return ToRegister(op->index());
317 }
318
319
EmitLoadRegister(LOperand * op,Register scratch)320 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
321 if (op->IsRegister()) {
322 return ToRegister(op->index());
323 } else if (op->IsConstantOperand()) {
324 LConstantOperand* const_op = LConstantOperand::cast(op);
325 Handle<Object> literal = chunk_->LookupLiteral(const_op);
326 Representation r = chunk_->LookupLiteralRepresentation(const_op);
327 if (r.IsInteger32()) {
328 ASSERT(literal->IsNumber());
329 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
330 } else if (r.IsDouble()) {
331 Abort("EmitLoadRegister: Unsupported double immediate.");
332 } else {
333 ASSERT(r.IsTagged());
334 if (literal->IsSmi()) {
335 __ mov(scratch, Operand(literal));
336 } else {
337 __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
338 }
339 }
340 return scratch;
341 } else if (op->IsStackSlot() || op->IsArgument()) {
342 __ ldr(scratch, ToMemOperand(op));
343 return scratch;
344 }
345 UNREACHABLE();
346 return scratch;
347 }
348
349
ToDoubleRegister(LOperand * op) const350 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
351 ASSERT(op->IsDoubleRegister());
352 return ToDoubleRegister(op->index());
353 }
354
355
EmitLoadDoubleRegister(LOperand * op,SwVfpRegister flt_scratch,DoubleRegister dbl_scratch)356 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
357 SwVfpRegister flt_scratch,
358 DoubleRegister dbl_scratch) {
359 if (op->IsDoubleRegister()) {
360 return ToDoubleRegister(op->index());
361 } else if (op->IsConstantOperand()) {
362 LConstantOperand* const_op = LConstantOperand::cast(op);
363 Handle<Object> literal = chunk_->LookupLiteral(const_op);
364 Representation r = chunk_->LookupLiteralRepresentation(const_op);
365 if (r.IsInteger32()) {
366 ASSERT(literal->IsNumber());
367 __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
368 __ vmov(flt_scratch, ip);
369 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
370 return dbl_scratch;
371 } else if (r.IsDouble()) {
372 Abort("unsupported double immediate");
373 } else if (r.IsTagged()) {
374 Abort("unsupported tagged immediate");
375 }
376 } else if (op->IsStackSlot() || op->IsArgument()) {
377 // TODO(regis): Why is vldr not taking a MemOperand?
378 // __ vldr(dbl_scratch, ToMemOperand(op));
379 MemOperand mem_op = ToMemOperand(op);
380 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
381 return dbl_scratch;
382 }
383 UNREACHABLE();
384 return dbl_scratch;
385 }
386
387
ToHandle(LConstantOperand * op) const388 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
389 Handle<Object> literal = chunk_->LookupLiteral(op);
390 ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
391 return literal;
392 }
393
394
IsInteger32(LConstantOperand * op) const395 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
396 return chunk_->LookupLiteralRepresentation(op).IsInteger32();
397 }
398
399
ToInteger32(LConstantOperand * op) const400 int LCodeGen::ToInteger32(LConstantOperand* op) const {
401 Handle<Object> value = chunk_->LookupLiteral(op);
402 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
403 ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
404 value->Number());
405 return static_cast<int32_t>(value->Number());
406 }
407
408
ToDouble(LConstantOperand * op) const409 double LCodeGen::ToDouble(LConstantOperand* op) const {
410 Handle<Object> value = chunk_->LookupLiteral(op);
411 return value->Number();
412 }
413
414
ToOperand(LOperand * op)415 Operand LCodeGen::ToOperand(LOperand* op) {
416 if (op->IsConstantOperand()) {
417 LConstantOperand* const_op = LConstantOperand::cast(op);
418 Handle<Object> literal = chunk_->LookupLiteral(const_op);
419 Representation r = chunk_->LookupLiteralRepresentation(const_op);
420 if (r.IsInteger32()) {
421 ASSERT(literal->IsNumber());
422 return Operand(static_cast<int32_t>(literal->Number()));
423 } else if (r.IsDouble()) {
424 Abort("ToOperand Unsupported double immediate.");
425 }
426 ASSERT(r.IsTagged());
427 return Operand(literal);
428 } else if (op->IsRegister()) {
429 return Operand(ToRegister(op));
430 } else if (op->IsDoubleRegister()) {
431 Abort("ToOperand IsDoubleRegister unimplemented");
432 return Operand(0);
433 }
434 // Stack slots not implemented, use ToMemOperand instead.
435 UNREACHABLE();
436 return Operand(0);
437 }
438
439
ToMemOperand(LOperand * op) const440 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
441 ASSERT(!op->IsRegister());
442 ASSERT(!op->IsDoubleRegister());
443 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
444 int index = op->index();
445 if (index >= 0) {
446 // Local or spill slot. Skip the frame pointer, function, and
447 // context in the fixed part of the frame.
448 return MemOperand(fp, -(index + 3) * kPointerSize);
449 } else {
450 // Incoming parameter. Skip the return address.
451 return MemOperand(fp, -(index - 1) * kPointerSize);
452 }
453 }
454
455
ToHighMemOperand(LOperand * op) const456 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
457 ASSERT(op->IsDoubleStackSlot());
458 int index = op->index();
459 if (index >= 0) {
460 // Local or spill slot. Skip the frame pointer, function, context,
461 // and the first word of the double in the fixed part of the frame.
462 return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
463 } else {
464 // Incoming parameter. Skip the return address and the first word of
465 // the double.
466 return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
467 }
468 }
469
470
WriteTranslation(LEnvironment * environment,Translation * translation)471 void LCodeGen::WriteTranslation(LEnvironment* environment,
472 Translation* translation) {
473 if (environment == NULL) return;
474
475 // The translation includes one command per value in the environment.
476 int translation_size = environment->values()->length();
477 // The output frame height does not include the parameters.
478 int height = translation_size - environment->parameter_count();
479
480 WriteTranslation(environment->outer(), translation);
481 int closure_id = DefineDeoptimizationLiteral(environment->closure());
482 switch (environment->frame_type()) {
483 case JS_FUNCTION:
484 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
485 break;
486 case JS_CONSTRUCT:
487 translation->BeginConstructStubFrame(closure_id, translation_size);
488 break;
489 case ARGUMENTS_ADAPTOR:
490 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
491 break;
492 default:
493 UNREACHABLE();
494 }
495 for (int i = 0; i < translation_size; ++i) {
496 LOperand* value = environment->values()->at(i);
497 // spilled_registers_ and spilled_double_registers_ are either
498 // both NULL or both set.
499 if (environment->spilled_registers() != NULL && value != NULL) {
500 if (value->IsRegister() &&
501 environment->spilled_registers()[value->index()] != NULL) {
502 translation->MarkDuplicate();
503 AddToTranslation(translation,
504 environment->spilled_registers()[value->index()],
505 environment->HasTaggedValueAt(i));
506 } else if (
507 value->IsDoubleRegister() &&
508 environment->spilled_double_registers()[value->index()] != NULL) {
509 translation->MarkDuplicate();
510 AddToTranslation(
511 translation,
512 environment->spilled_double_registers()[value->index()],
513 false);
514 }
515 }
516
517 AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
518 }
519 }
520
521
AddToTranslation(Translation * translation,LOperand * op,bool is_tagged)522 void LCodeGen::AddToTranslation(Translation* translation,
523 LOperand* op,
524 bool is_tagged) {
525 if (op == NULL) {
526 // TODO(twuerthinger): Introduce marker operands to indicate that this value
527 // is not present and must be reconstructed from the deoptimizer. Currently
528 // this is only used for the arguments object.
529 translation->StoreArgumentsObject();
530 } else if (op->IsStackSlot()) {
531 if (is_tagged) {
532 translation->StoreStackSlot(op->index());
533 } else {
534 translation->StoreInt32StackSlot(op->index());
535 }
536 } else if (op->IsDoubleStackSlot()) {
537 translation->StoreDoubleStackSlot(op->index());
538 } else if (op->IsArgument()) {
539 ASSERT(is_tagged);
540 int src_index = GetStackSlotCount() + op->index();
541 translation->StoreStackSlot(src_index);
542 } else if (op->IsRegister()) {
543 Register reg = ToRegister(op);
544 if (is_tagged) {
545 translation->StoreRegister(reg);
546 } else {
547 translation->StoreInt32Register(reg);
548 }
549 } else if (op->IsDoubleRegister()) {
550 DoubleRegister reg = ToDoubleRegister(op);
551 translation->StoreDoubleRegister(reg);
552 } else if (op->IsConstantOperand()) {
553 Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
554 int src_index = DefineDeoptimizationLiteral(literal);
555 translation->StoreLiteral(src_index);
556 } else {
557 UNREACHABLE();
558 }
559 }
560
561
CallCode(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr)562 void LCodeGen::CallCode(Handle<Code> code,
563 RelocInfo::Mode mode,
564 LInstruction* instr) {
565 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
566 }
567
568
CallCodeGeneric(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr,SafepointMode safepoint_mode)569 void LCodeGen::CallCodeGeneric(Handle<Code> code,
570 RelocInfo::Mode mode,
571 LInstruction* instr,
572 SafepointMode safepoint_mode) {
573 ASSERT(instr != NULL);
574 LPointerMap* pointers = instr->pointer_map();
575 RecordPosition(pointers->position());
576 __ Call(code, mode);
577 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
578
579 // Signal that we don't inline smi code before these stubs in the
580 // optimizing code generator.
581 if (code->kind() == Code::BINARY_OP_IC ||
582 code->kind() == Code::COMPARE_IC) {
583 __ nop();
584 }
585 }
586
587
CallRuntime(const Runtime::Function * function,int num_arguments,LInstruction * instr)588 void LCodeGen::CallRuntime(const Runtime::Function* function,
589 int num_arguments,
590 LInstruction* instr) {
591 ASSERT(instr != NULL);
592 LPointerMap* pointers = instr->pointer_map();
593 ASSERT(pointers != NULL);
594 RecordPosition(pointers->position());
595
596 __ CallRuntime(function, num_arguments);
597 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
598 }
599
600
CallRuntimeFromDeferred(Runtime::FunctionId id,int argc,LInstruction * instr)601 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
602 int argc,
603 LInstruction* instr) {
604 __ CallRuntimeSaveDoubles(id);
605 RecordSafepointWithRegisters(
606 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
607 }
608
609
RegisterEnvironmentForDeoptimization(LEnvironment * environment,Safepoint::DeoptMode mode)610 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
611 Safepoint::DeoptMode mode) {
612 if (!environment->HasBeenRegistered()) {
613 // Physical stack frame layout:
614 // -x ............. -4 0 ..................................... y
615 // [incoming arguments] [spill slots] [pushed outgoing arguments]
616
617 // Layout of the environment:
618 // 0 ..................................................... size-1
619 // [parameters] [locals] [expression stack including arguments]
620
621 // Layout of the translation:
622 // 0 ........................................................ size - 1 + 4
623 // [expression stack including arguments] [locals] [4 words] [parameters]
624 // |>------------ translation_size ------------<|
625
626 int frame_count = 0;
627 int jsframe_count = 0;
628 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
629 ++frame_count;
630 if (e->frame_type() == JS_FUNCTION) {
631 ++jsframe_count;
632 }
633 }
634 Translation translation(&translations_, frame_count, jsframe_count);
635 WriteTranslation(environment, &translation);
636 int deoptimization_index = deoptimizations_.length();
637 int pc_offset = masm()->pc_offset();
638 environment->Register(deoptimization_index,
639 translation.index(),
640 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
641 deoptimizations_.Add(environment);
642 }
643 }
644
645
DeoptimizeIf(Condition cc,LEnvironment * environment)646 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
647 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
648 ASSERT(environment->HasBeenRegistered());
649 int id = environment->deoptimization_index();
650 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
651 if (entry == NULL) {
652 Abort("bailout was not prepared");
653 return;
654 }
655
656 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
657
658 if (FLAG_deopt_every_n_times == 1 &&
659 info_->shared_info()->opt_count() == id) {
660 __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
661 return;
662 }
663
664 if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
665
666 if (cc == al) {
667 __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
668 } else {
669 // We often have several deopts to the same entry, reuse the last
670 // jump entry if this is the case.
671 if (deopt_jump_table_.is_empty() ||
672 (deopt_jump_table_.last().address != entry)) {
673 deopt_jump_table_.Add(JumpTableEntry(entry));
674 }
675 __ b(cc, &deopt_jump_table_.last().label);
676 }
677 }
678
679
PopulateDeoptimizationData(Handle<Code> code)680 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
681 int length = deoptimizations_.length();
682 if (length == 0) return;
683 Handle<DeoptimizationInputData> data =
684 factory()->NewDeoptimizationInputData(length, TENURED);
685
686 Handle<ByteArray> translations = translations_.CreateByteArray();
687 data->SetTranslationByteArray(*translations);
688 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
689
690 Handle<FixedArray> literals =
691 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
692 for (int i = 0; i < deoptimization_literals_.length(); i++) {
693 literals->set(i, *deoptimization_literals_[i]);
694 }
695 data->SetLiteralArray(*literals);
696
697 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
698 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
699
700 // Populate the deoptimization entries.
701 for (int i = 0; i < length; i++) {
702 LEnvironment* env = deoptimizations_[i];
703 data->SetAstId(i, Smi::FromInt(env->ast_id()));
704 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
705 data->SetArgumentsStackHeight(i,
706 Smi::FromInt(env->arguments_stack_height()));
707 data->SetPc(i, Smi::FromInt(env->pc_offset()));
708 }
709 code->set_deoptimization_data(*data);
710 }
711
712
DefineDeoptimizationLiteral(Handle<Object> literal)713 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
714 int result = deoptimization_literals_.length();
715 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
716 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
717 }
718 deoptimization_literals_.Add(literal);
719 return result;
720 }
721
722
PopulateDeoptimizationLiteralsWithInlinedFunctions()723 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
724 ASSERT(deoptimization_literals_.length() == 0);
725
726 const ZoneList<Handle<JSFunction> >* inlined_closures =
727 chunk()->inlined_closures();
728
729 for (int i = 0, length = inlined_closures->length();
730 i < length;
731 i++) {
732 DefineDeoptimizationLiteral(inlined_closures->at(i));
733 }
734
735 inlined_function_count_ = deoptimization_literals_.length();
736 }
737
738
RecordSafepointWithLazyDeopt(LInstruction * instr,SafepointMode safepoint_mode)739 void LCodeGen::RecordSafepointWithLazyDeopt(
740 LInstruction* instr, SafepointMode safepoint_mode) {
741 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
742 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
743 } else {
744 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
745 RecordSafepointWithRegisters(
746 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
747 }
748 }
749
750
RecordSafepoint(LPointerMap * pointers,Safepoint::Kind kind,int arguments,Safepoint::DeoptMode deopt_mode)751 void LCodeGen::RecordSafepoint(
752 LPointerMap* pointers,
753 Safepoint::Kind kind,
754 int arguments,
755 Safepoint::DeoptMode deopt_mode) {
756 ASSERT(expected_safepoint_kind_ == kind);
757
758 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
759 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
760 kind, arguments, deopt_mode);
761 for (int i = 0; i < operands->length(); i++) {
762 LOperand* pointer = operands->at(i);
763 if (pointer->IsStackSlot()) {
764 safepoint.DefinePointerSlot(pointer->index());
765 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
766 safepoint.DefinePointerRegister(ToRegister(pointer));
767 }
768 }
769 if (kind & Safepoint::kWithRegisters) {
770 // Register cp always contains a pointer to the context.
771 safepoint.DefinePointerRegister(cp);
772 }
773 }
774
775
RecordSafepoint(LPointerMap * pointers,Safepoint::DeoptMode deopt_mode)776 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
777 Safepoint::DeoptMode deopt_mode) {
778 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
779 }
780
781
RecordSafepoint(Safepoint::DeoptMode deopt_mode)782 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
783 LPointerMap empty_pointers(RelocInfo::kNoPosition);
784 RecordSafepoint(&empty_pointers, deopt_mode);
785 }
786
787
RecordSafepointWithRegisters(LPointerMap * pointers,int arguments,Safepoint::DeoptMode deopt_mode)788 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
789 int arguments,
790 Safepoint::DeoptMode deopt_mode) {
791 RecordSafepoint(
792 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
793 }
794
795
RecordSafepointWithRegistersAndDoubles(LPointerMap * pointers,int arguments,Safepoint::DeoptMode deopt_mode)796 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
797 LPointerMap* pointers,
798 int arguments,
799 Safepoint::DeoptMode deopt_mode) {
800 RecordSafepoint(
801 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
802 }
803
804
RecordPosition(int position)805 void LCodeGen::RecordPosition(int position) {
806 if (position == RelocInfo::kNoPosition) return;
807 masm()->positions_recorder()->RecordPosition(position);
808 }
809
810
DoLabel(LLabel * label)811 void LCodeGen::DoLabel(LLabel* label) {
812 if (label->is_loop_header()) {
813 Comment(";;; B%d - LOOP entry", label->block_id());
814 } else {
815 Comment(";;; B%d", label->block_id());
816 }
817 __ bind(label->label());
818 current_block_ = label->block_id();
819 DoGap(label);
820 }
821
822
DoParallelMove(LParallelMove * move)823 void LCodeGen::DoParallelMove(LParallelMove* move) {
824 resolver_.Resolve(move);
825 }
826
827
DoGap(LGap * gap)828 void LCodeGen::DoGap(LGap* gap) {
829 for (int i = LGap::FIRST_INNER_POSITION;
830 i <= LGap::LAST_INNER_POSITION;
831 i++) {
832 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
833 LParallelMove* move = gap->GetParallelMove(inner_pos);
834 if (move != NULL) DoParallelMove(move);
835 }
836 }
837
838
DoInstructionGap(LInstructionGap * instr)839 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
840 DoGap(instr);
841 }
842
843
DoParameter(LParameter * instr)844 void LCodeGen::DoParameter(LParameter* instr) {
845 // Nothing to do.
846 }
847
848
DoCallStub(LCallStub * instr)849 void LCodeGen::DoCallStub(LCallStub* instr) {
850 ASSERT(ToRegister(instr->result()).is(r0));
851 switch (instr->hydrogen()->major_key()) {
852 case CodeStub::RegExpConstructResult: {
853 RegExpConstructResultStub stub;
854 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
855 break;
856 }
857 case CodeStub::RegExpExec: {
858 RegExpExecStub stub;
859 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
860 break;
861 }
862 case CodeStub::SubString: {
863 SubStringStub stub;
864 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
865 break;
866 }
867 case CodeStub::NumberToString: {
868 NumberToStringStub stub;
869 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
870 break;
871 }
872 case CodeStub::StringAdd: {
873 StringAddStub stub(NO_STRING_ADD_FLAGS);
874 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
875 break;
876 }
877 case CodeStub::StringCompare: {
878 StringCompareStub stub;
879 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
880 break;
881 }
882 case CodeStub::TranscendentalCache: {
883 __ ldr(r0, MemOperand(sp, 0));
884 TranscendentalCacheStub stub(instr->transcendental_type(),
885 TranscendentalCacheStub::TAGGED);
886 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
887 break;
888 }
889 default:
890 UNREACHABLE();
891 }
892 }
893
894
DoUnknownOSRValue(LUnknownOSRValue * instr)895 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
896 // Nothing to do.
897 }
898
899
DoModI(LModI * instr)900 void LCodeGen::DoModI(LModI* instr) {
901 if (instr->hydrogen()->HasPowerOf2Divisor()) {
902 Register dividend = ToRegister(instr->InputAt(0));
903 Register result = ToRegister(instr->result());
904
905 int32_t divisor =
906 HConstant::cast(instr->hydrogen()->right())->Integer32Value();
907
908 if (divisor < 0) divisor = -divisor;
909
910 Label positive_dividend, done;
911 __ cmp(dividend, Operand(0));
912 __ b(pl, &positive_dividend);
913 __ rsb(result, dividend, Operand(0));
914 __ and_(result, result, Operand(divisor - 1), SetCC);
915 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
916 DeoptimizeIf(eq, instr->environment());
917 }
918 __ rsb(result, result, Operand(0));
919 __ b(&done);
920 __ bind(&positive_dividend);
921 __ and_(result, dividend, Operand(divisor - 1));
922 __ bind(&done);
923 return;
924 }
925
926 // These registers hold untagged 32 bit values.
927 Register left = ToRegister(instr->InputAt(0));
928 Register right = ToRegister(instr->InputAt(1));
929 Register result = ToRegister(instr->result());
930
931 Register scratch = scratch0();
932 Register scratch2 = ToRegister(instr->TempAt(0));
933 DwVfpRegister dividend = ToDoubleRegister(instr->TempAt(1));
934 DwVfpRegister divisor = ToDoubleRegister(instr->TempAt(2));
935 DwVfpRegister quotient = double_scratch0();
936
937 ASSERT(!dividend.is(divisor));
938 ASSERT(!dividend.is(quotient));
939 ASSERT(!divisor.is(quotient));
940 ASSERT(!scratch.is(left));
941 ASSERT(!scratch.is(right));
942 ASSERT(!scratch.is(result));
943
944 Label done, vfp_modulo, both_positive, right_negative;
945
946 // Check for x % 0.
947 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
948 __ cmp(right, Operand(0));
949 DeoptimizeIf(eq, instr->environment());
950 }
951
952 __ Move(result, left);
953
954 // (0 % x) must yield 0 (if x is finite, which is the case here).
955 __ cmp(left, Operand(0));
956 __ b(eq, &done);
957 // Preload right in a vfp register.
958 __ vmov(divisor.low(), right);
959 __ b(lt, &vfp_modulo);
960
961 __ cmp(left, Operand(right));
962 __ b(lt, &done);
963
964 // Check for (positive) power of two on the right hand side.
965 __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
966 scratch,
967 &right_negative,
968 &both_positive);
969 // Perform modulo operation (scratch contains right - 1).
970 __ and_(result, scratch, Operand(left));
971 __ b(&done);
972
973 __ bind(&right_negative);
974 // Negate right. The sign of the divisor does not matter.
975 __ rsb(right, right, Operand(0));
976
977 __ bind(&both_positive);
978 const int kUnfolds = 3;
979 // If the right hand side is smaller than the (nonnegative)
980 // left hand side, the left hand side is the result.
981 // Else try a few subtractions of the left hand side.
982 __ mov(scratch, left);
983 for (int i = 0; i < kUnfolds; i++) {
984 // Check if the left hand side is less or equal than the
985 // the right hand side.
986 __ cmp(scratch, Operand(right));
987 __ mov(result, scratch, LeaveCC, lt);
988 __ b(lt, &done);
989 // If not, reduce the left hand side by the right hand
990 // side and check again.
991 if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
992 }
993
994 __ bind(&vfp_modulo);
995 // Load the arguments in VFP registers.
996 // The divisor value is preloaded before. Be careful that 'right' is only live
997 // on entry.
998 __ vmov(dividend.low(), left);
999 // From here on don't use right as it may have been reallocated (for example
1000 // to scratch2).
1001 right = no_reg;
1002
1003 __ vcvt_f64_s32(dividend, dividend.low());
1004 __ vcvt_f64_s32(divisor, divisor.low());
1005
1006 // We do not care about the sign of the divisor.
1007 __ vabs(divisor, divisor);
1008 // Compute the quotient and round it to a 32bit integer.
1009 __ vdiv(quotient, dividend, divisor);
1010 __ vcvt_s32_f64(quotient.low(), quotient);
1011 __ vcvt_f64_s32(quotient, quotient.low());
1012
1013 // Compute the remainder in result.
1014 DwVfpRegister double_scratch = dividend;
1015 __ vmul(double_scratch, divisor, quotient);
1016 __ vcvt_s32_f64(double_scratch.low(), double_scratch);
1017 __ vmov(scratch, double_scratch.low());
1018
1019 if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1020 __ sub(result, left, scratch);
1021 } else {
1022 Label ok;
1023 // Check for -0.
1024 __ sub(scratch2, left, scratch, SetCC);
1025 __ b(ne, &ok);
1026 __ cmp(left, Operand(0));
1027 DeoptimizeIf(mi, instr->environment());
1028 __ bind(&ok);
1029 // Load the result and we are done.
1030 __ mov(result, scratch2);
1031 }
1032
1033 __ bind(&done);
1034 }
1035
1036
DoDivI(LDivI * instr)1037 void LCodeGen::DoDivI(LDivI* instr) {
1038 class DeferredDivI: public LDeferredCode {
1039 public:
1040 DeferredDivI(LCodeGen* codegen, LDivI* instr)
1041 : LDeferredCode(codegen), instr_(instr) { }
1042 virtual void Generate() {
1043 codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
1044 }
1045 virtual LInstruction* instr() { return instr_; }
1046 private:
1047 LDivI* instr_;
1048 };
1049
1050 const Register left = ToRegister(instr->InputAt(0));
1051 const Register right = ToRegister(instr->InputAt(1));
1052 const Register scratch = scratch0();
1053 const Register result = ToRegister(instr->result());
1054
1055 // Check for x / 0.
1056 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1057 __ cmp(right, Operand(0));
1058 DeoptimizeIf(eq, instr->environment());
1059 }
1060
1061 // Check for (0 / -x) that will produce negative zero.
1062 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1063 Label left_not_zero;
1064 __ cmp(left, Operand(0));
1065 __ b(ne, &left_not_zero);
1066 __ cmp(right, Operand(0));
1067 DeoptimizeIf(mi, instr->environment());
1068 __ bind(&left_not_zero);
1069 }
1070
1071 // Check for (-kMinInt / -1).
1072 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1073 Label left_not_min_int;
1074 __ cmp(left, Operand(kMinInt));
1075 __ b(ne, &left_not_min_int);
1076 __ cmp(right, Operand(-1));
1077 DeoptimizeIf(eq, instr->environment());
1078 __ bind(&left_not_min_int);
1079 }
1080
1081 Label done, deoptimize;
1082 // Test for a few common cases first.
1083 __ cmp(right, Operand(1));
1084 __ mov(result, left, LeaveCC, eq);
1085 __ b(eq, &done);
1086
1087 __ cmp(right, Operand(2));
1088 __ tst(left, Operand(1), eq);
1089 __ mov(result, Operand(left, ASR, 1), LeaveCC, eq);
1090 __ b(eq, &done);
1091
1092 __ cmp(right, Operand(4));
1093 __ tst(left, Operand(3), eq);
1094 __ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
1095 __ b(eq, &done);
1096
1097 // Call the stub. The numbers in r0 and r1 have
1098 // to be tagged to Smis. If that is not possible, deoptimize.
1099 DeferredDivI* deferred = new DeferredDivI(this, instr);
1100
1101 __ TrySmiTag(left, &deoptimize, scratch);
1102 __ TrySmiTag(right, &deoptimize, scratch);
1103
1104 __ b(al, deferred->entry());
1105 __ bind(deferred->exit());
1106
1107 // If the result in r0 is a Smi, untag it, else deoptimize.
1108 __ JumpIfNotSmi(result, &deoptimize);
1109 __ SmiUntag(result);
1110 __ b(&done);
1111
1112 __ bind(&deoptimize);
1113 DeoptimizeIf(al, instr->environment());
1114 __ bind(&done);
1115 }
1116
1117
1118 template<int T>
DoDeferredBinaryOpStub(LTemplateInstruction<1,2,T> * instr,Token::Value op)1119 void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
1120 Token::Value op) {
1121 Register left = ToRegister(instr->InputAt(0));
1122 Register right = ToRegister(instr->InputAt(1));
1123
1124 PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
1125 // Move left to r1 and right to r0 for the stub call.
1126 if (left.is(r1)) {
1127 __ Move(r0, right);
1128 } else if (left.is(r0) && right.is(r1)) {
1129 __ Swap(r0, r1, r2);
1130 } else if (left.is(r0)) {
1131 ASSERT(!right.is(r1));
1132 __ mov(r1, r0);
1133 __ mov(r0, right);
1134 } else {
1135 ASSERT(!left.is(r0) && !right.is(r0));
1136 __ mov(r0, right);
1137 __ mov(r1, left);
1138 }
1139 BinaryOpStub stub(op, OVERWRITE_LEFT);
1140 __ CallStub(&stub);
1141 RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
1142 0,
1143 Safepoint::kNoLazyDeopt);
1144 // Overwrite the stored value of r0 with the result of the stub.
1145 __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
1146 }
1147
1148
DoMulI(LMulI * instr)1149 void LCodeGen::DoMulI(LMulI* instr) {
1150 Register scratch = scratch0();
1151 Register result = ToRegister(instr->result());
1152 // Note that result may alias left.
1153 Register left = ToRegister(instr->InputAt(0));
1154 LOperand* right_op = instr->InputAt(1);
1155
1156 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1157 bool bailout_on_minus_zero =
1158 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1159
1160 if (right_op->IsConstantOperand() && !can_overflow) {
1161 // Use optimized code for specific constants.
1162 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1163
1164 if (bailout_on_minus_zero && (constant < 0)) {
1165 // The case of a null constant will be handled separately.
1166 // If constant is negative and left is null, the result should be -0.
1167 __ cmp(left, Operand(0));
1168 DeoptimizeIf(eq, instr->environment());
1169 }
1170
1171 switch (constant) {
1172 case -1:
1173 __ rsb(result, left, Operand(0));
1174 break;
1175 case 0:
1176 if (bailout_on_minus_zero) {
1177 // If left is strictly negative and the constant is null, the
1178 // result is -0. Deoptimize if required, otherwise return 0.
1179 __ cmp(left, Operand(0));
1180 DeoptimizeIf(mi, instr->environment());
1181 }
1182 __ mov(result, Operand(0));
1183 break;
1184 case 1:
1185 __ Move(result, left);
1186 break;
1187 default:
1188 // Multiplying by powers of two and powers of two plus or minus
1189 // one can be done faster with shifted operands.
1190 // For other constants we emit standard code.
1191 int32_t mask = constant >> 31;
1192 uint32_t constant_abs = (constant + mask) ^ mask;
1193
1194 if (IsPowerOf2(constant_abs) ||
1195 IsPowerOf2(constant_abs - 1) ||
1196 IsPowerOf2(constant_abs + 1)) {
1197 if (IsPowerOf2(constant_abs)) {
1198 int32_t shift = WhichPowerOf2(constant_abs);
1199 __ mov(result, Operand(left, LSL, shift));
1200 } else if (IsPowerOf2(constant_abs - 1)) {
1201 int32_t shift = WhichPowerOf2(constant_abs - 1);
1202 __ add(result, left, Operand(left, LSL, shift));
1203 } else if (IsPowerOf2(constant_abs + 1)) {
1204 int32_t shift = WhichPowerOf2(constant_abs + 1);
1205 __ rsb(result, left, Operand(left, LSL, shift));
1206 }
1207
1208 // Correct the sign of the result is the constant is negative.
1209 if (constant < 0) __ rsb(result, result, Operand(0));
1210
1211 } else {
1212 // Generate standard code.
1213 __ mov(ip, Operand(constant));
1214 __ mul(result, left, ip);
1215 }
1216 }
1217
1218 } else {
1219 Register right = EmitLoadRegister(right_op, scratch);
1220 if (bailout_on_minus_zero) {
1221 __ orr(ToRegister(instr->TempAt(0)), left, right);
1222 }
1223
1224 if (can_overflow) {
1225 // scratch:result = left * right.
1226 __ smull(result, scratch, left, right);
1227 __ cmp(scratch, Operand(result, ASR, 31));
1228 DeoptimizeIf(ne, instr->environment());
1229 } else {
1230 __ mul(result, left, right);
1231 }
1232
1233 if (bailout_on_minus_zero) {
1234 // Bail out if the result is supposed to be negative zero.
1235 Label done;
1236 __ cmp(result, Operand(0));
1237 __ b(ne, &done);
1238 __ cmp(ToRegister(instr->TempAt(0)), Operand(0));
1239 DeoptimizeIf(mi, instr->environment());
1240 __ bind(&done);
1241 }
1242 }
1243 }
1244
1245
DoBitI(LBitI * instr)1246 void LCodeGen::DoBitI(LBitI* instr) {
1247 LOperand* left_op = instr->InputAt(0);
1248 LOperand* right_op = instr->InputAt(1);
1249 ASSERT(left_op->IsRegister());
1250 Register left = ToRegister(left_op);
1251 Register result = ToRegister(instr->result());
1252 Operand right(no_reg);
1253
1254 if (right_op->IsStackSlot() || right_op->IsArgument()) {
1255 right = Operand(EmitLoadRegister(right_op, ip));
1256 } else {
1257 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1258 right = ToOperand(right_op);
1259 }
1260
1261 switch (instr->op()) {
1262 case Token::BIT_AND:
1263 __ and_(result, left, right);
1264 break;
1265 case Token::BIT_OR:
1266 __ orr(result, left, right);
1267 break;
1268 case Token::BIT_XOR:
1269 __ eor(result, left, right);
1270 break;
1271 default:
1272 UNREACHABLE();
1273 break;
1274 }
1275 }
1276
1277
DoShiftI(LShiftI * instr)1278 void LCodeGen::DoShiftI(LShiftI* instr) {
1279 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1280 // result may alias either of them.
1281 LOperand* right_op = instr->InputAt(1);
1282 Register left = ToRegister(instr->InputAt(0));
1283 Register result = ToRegister(instr->result());
1284 Register scratch = scratch0();
1285 if (right_op->IsRegister()) {
1286 // Mask the right_op operand.
1287 __ and_(scratch, ToRegister(right_op), Operand(0x1F));
1288 switch (instr->op()) {
1289 case Token::SAR:
1290 __ mov(result, Operand(left, ASR, scratch));
1291 break;
1292 case Token::SHR:
1293 if (instr->can_deopt()) {
1294 __ mov(result, Operand(left, LSR, scratch), SetCC);
1295 DeoptimizeIf(mi, instr->environment());
1296 } else {
1297 __ mov(result, Operand(left, LSR, scratch));
1298 }
1299 break;
1300 case Token::SHL:
1301 __ mov(result, Operand(left, LSL, scratch));
1302 break;
1303 default:
1304 UNREACHABLE();
1305 break;
1306 }
1307 } else {
1308 // Mask the right_op operand.
1309 int value = ToInteger32(LConstantOperand::cast(right_op));
1310 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1311 switch (instr->op()) {
1312 case Token::SAR:
1313 if (shift_count != 0) {
1314 __ mov(result, Operand(left, ASR, shift_count));
1315 } else {
1316 __ Move(result, left);
1317 }
1318 break;
1319 case Token::SHR:
1320 if (shift_count != 0) {
1321 __ mov(result, Operand(left, LSR, shift_count));
1322 } else {
1323 if (instr->can_deopt()) {
1324 __ tst(left, Operand(0x80000000));
1325 DeoptimizeIf(ne, instr->environment());
1326 }
1327 __ Move(result, left);
1328 }
1329 break;
1330 case Token::SHL:
1331 if (shift_count != 0) {
1332 __ mov(result, Operand(left, LSL, shift_count));
1333 } else {
1334 __ Move(result, left);
1335 }
1336 break;
1337 default:
1338 UNREACHABLE();
1339 break;
1340 }
1341 }
1342 }
1343
1344
DoSubI(LSubI * instr)1345 void LCodeGen::DoSubI(LSubI* instr) {
1346 LOperand* left = instr->InputAt(0);
1347 LOperand* right = instr->InputAt(1);
1348 LOperand* result = instr->result();
1349 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1350 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1351
1352 if (right->IsStackSlot() || right->IsArgument()) {
1353 Register right_reg = EmitLoadRegister(right, ip);
1354 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1355 } else {
1356 ASSERT(right->IsRegister() || right->IsConstantOperand());
1357 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1358 }
1359
1360 if (can_overflow) {
1361 DeoptimizeIf(vs, instr->environment());
1362 }
1363 }
1364
1365
DoConstantI(LConstantI * instr)1366 void LCodeGen::DoConstantI(LConstantI* instr) {
1367 ASSERT(instr->result()->IsRegister());
1368 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1369 }
1370
1371
DoConstantD(LConstantD * instr)1372 void LCodeGen::DoConstantD(LConstantD* instr) {
1373 ASSERT(instr->result()->IsDoubleRegister());
1374 DwVfpRegister result = ToDoubleRegister(instr->result());
1375 double v = instr->value();
1376 __ Vmov(result, v);
1377 }
1378
1379
DoConstantT(LConstantT * instr)1380 void LCodeGen::DoConstantT(LConstantT* instr) {
1381 Handle<Object> value = instr->value();
1382 if (value->IsSmi()) {
1383 __ mov(ToRegister(instr->result()), Operand(value));
1384 } else {
1385 __ LoadHeapObject(ToRegister(instr->result()),
1386 Handle<HeapObject>::cast(value));
1387 }
1388 }
1389
1390
DoJSArrayLength(LJSArrayLength * instr)1391 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1392 Register result = ToRegister(instr->result());
1393 Register array = ToRegister(instr->InputAt(0));
1394 __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
1395 }
1396
1397
DoFixedArrayBaseLength(LFixedArrayBaseLength * instr)1398 void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1399 Register result = ToRegister(instr->result());
1400 Register array = ToRegister(instr->InputAt(0));
1401 __ ldr(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
1402 }
1403
1404
DoElementsKind(LElementsKind * instr)1405 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1406 Register result = ToRegister(instr->result());
1407 Register input = ToRegister(instr->InputAt(0));
1408
1409 // Load map into |result|.
1410 __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
1411 // Load the map's "bit field 2" into |result|. We only need the first byte,
1412 // but the following bit field extraction takes care of that anyway.
1413 __ ldr(result, FieldMemOperand(result, Map::kBitField2Offset));
1414 // Retrieve elements_kind from bit field 2.
1415 __ ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
1416 }
1417
1418
DoValueOf(LValueOf * instr)1419 void LCodeGen::DoValueOf(LValueOf* instr) {
1420 Register input = ToRegister(instr->InputAt(0));
1421 Register result = ToRegister(instr->result());
1422 Register map = ToRegister(instr->TempAt(0));
1423 Label done;
1424
1425 // If the object is a smi return the object.
1426 __ tst(input, Operand(kSmiTagMask));
1427 __ Move(result, input, eq);
1428 __ b(eq, &done);
1429
1430 // If the object is not a value type, return the object.
1431 __ CompareObjectType(input, map, map, JS_VALUE_TYPE);
1432 __ Move(result, input, ne);
1433 __ b(ne, &done);
1434 __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
1435
1436 __ bind(&done);
1437 }
1438
1439
DoDateField(LDateField * instr)1440 void LCodeGen::DoDateField(LDateField* instr) {
1441 Register object = ToRegister(instr->InputAt(0));
1442 Register result = ToRegister(instr->result());
1443 Register scratch = ToRegister(instr->TempAt(0));
1444 Smi* index = instr->index();
1445 Label runtime, done;
1446 ASSERT(object.is(result));
1447 ASSERT(object.is(r0));
1448 ASSERT(!scratch.is(scratch0()));
1449 ASSERT(!scratch.is(object));
1450
1451 #ifdef DEBUG
1452 __ AbortIfSmi(object);
1453 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
1454 __ Assert(eq, "Trying to get date field from non-date.");
1455 #endif
1456
1457 if (index->value() == 0) {
1458 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
1459 } else {
1460 if (index->value() < JSDate::kFirstUncachedField) {
1461 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1462 __ mov(scratch, Operand(stamp));
1463 __ ldr(scratch, MemOperand(scratch));
1464 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1465 __ cmp(scratch, scratch0());
1466 __ b(ne, &runtime);
1467 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
1468 kPointerSize * index->value()));
1469 __ jmp(&done);
1470 }
1471 __ bind(&runtime);
1472 __ PrepareCallCFunction(2, scratch);
1473 __ mov(r1, Operand(index));
1474 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1475 __ bind(&done);
1476 }
1477 }
1478
1479
DoBitNotI(LBitNotI * instr)1480 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1481 Register input = ToRegister(instr->InputAt(0));
1482 Register result = ToRegister(instr->result());
1483 __ mvn(result, Operand(input));
1484 }
1485
1486
DoThrow(LThrow * instr)1487 void LCodeGen::DoThrow(LThrow* instr) {
1488 Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
1489 __ push(input_reg);
1490 CallRuntime(Runtime::kThrow, 1, instr);
1491
1492 if (FLAG_debug_code) {
1493 __ stop("Unreachable code.");
1494 }
1495 }
1496
1497
DoAddI(LAddI * instr)1498 void LCodeGen::DoAddI(LAddI* instr) {
1499 LOperand* left = instr->InputAt(0);
1500 LOperand* right = instr->InputAt(1);
1501 LOperand* result = instr->result();
1502 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1503 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1504
1505 if (right->IsStackSlot() || right->IsArgument()) {
1506 Register right_reg = EmitLoadRegister(right, ip);
1507 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1508 } else {
1509 ASSERT(right->IsRegister() || right->IsConstantOperand());
1510 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1511 }
1512
1513 if (can_overflow) {
1514 DeoptimizeIf(vs, instr->environment());
1515 }
1516 }
1517
1518
DoArithmeticD(LArithmeticD * instr)1519 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1520 DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
1521 DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
1522 DoubleRegister result = ToDoubleRegister(instr->result());
1523 switch (instr->op()) {
1524 case Token::ADD:
1525 __ vadd(result, left, right);
1526 break;
1527 case Token::SUB:
1528 __ vsub(result, left, right);
1529 break;
1530 case Token::MUL:
1531 __ vmul(result, left, right);
1532 break;
1533 case Token::DIV:
1534 __ vdiv(result, left, right);
1535 break;
1536 case Token::MOD: {
1537 // Save r0-r3 on the stack.
1538 __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
1539
1540 __ PrepareCallCFunction(0, 2, scratch0());
1541 __ SetCallCDoubleArguments(left, right);
1542 __ CallCFunction(
1543 ExternalReference::double_fp_operation(Token::MOD, isolate()),
1544 0, 2);
1545 // Move the result in the double result register.
1546 __ GetCFunctionDoubleResult(result);
1547
1548 // Restore r0-r3.
1549 __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
1550 break;
1551 }
1552 default:
1553 UNREACHABLE();
1554 break;
1555 }
1556 }
1557
1558
DoArithmeticT(LArithmeticT * instr)1559 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1560 ASSERT(ToRegister(instr->InputAt(0)).is(r1));
1561 ASSERT(ToRegister(instr->InputAt(1)).is(r0));
1562 ASSERT(ToRegister(instr->result()).is(r0));
1563
1564 BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1565 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1566 __ nop(); // Signals no inlined code.
1567 }
1568
1569
GetNextEmittedBlock(int block)1570 int LCodeGen::GetNextEmittedBlock(int block) {
1571 for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1572 LLabel* label = chunk_->GetLabel(i);
1573 if (!label->HasReplacement()) return i;
1574 }
1575 return -1;
1576 }
1577
1578
EmitBranch(int left_block,int right_block,Condition cc)1579 void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1580 int next_block = GetNextEmittedBlock(current_block_);
1581 right_block = chunk_->LookupDestination(right_block);
1582 left_block = chunk_->LookupDestination(left_block);
1583
1584 if (right_block == left_block) {
1585 EmitGoto(left_block);
1586 } else if (left_block == next_block) {
1587 __ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1588 } else if (right_block == next_block) {
1589 __ b(cc, chunk_->GetAssemblyLabel(left_block));
1590 } else {
1591 __ b(cc, chunk_->GetAssemblyLabel(left_block));
1592 __ b(chunk_->GetAssemblyLabel(right_block));
1593 }
1594 }
1595
1596
DoBranch(LBranch * instr)1597 void LCodeGen::DoBranch(LBranch* instr) {
1598 int true_block = chunk_->LookupDestination(instr->true_block_id());
1599 int false_block = chunk_->LookupDestination(instr->false_block_id());
1600
1601 Representation r = instr->hydrogen()->value()->representation();
1602 if (r.IsInteger32()) {
1603 Register reg = ToRegister(instr->InputAt(0));
1604 __ cmp(reg, Operand(0));
1605 EmitBranch(true_block, false_block, ne);
1606 } else if (r.IsDouble()) {
1607 DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
1608 Register scratch = scratch0();
1609
1610 // Test the double value. Zero and NaN are false.
1611 __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
1612 __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
1613 EmitBranch(true_block, false_block, eq);
1614 } else {
1615 ASSERT(r.IsTagged());
1616 Register reg = ToRegister(instr->InputAt(0));
1617 HType type = instr->hydrogen()->value()->type();
1618 if (type.IsBoolean()) {
1619 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1620 EmitBranch(true_block, false_block, eq);
1621 } else if (type.IsSmi()) {
1622 __ cmp(reg, Operand(0));
1623 EmitBranch(true_block, false_block, ne);
1624 } else {
1625 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1626 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1627
1628 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1629 // Avoid deopts in the case where we've never executed this path before.
1630 if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1631
1632 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1633 // undefined -> false.
1634 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
1635 __ b(eq, false_label);
1636 }
1637 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1638 // Boolean -> its value.
1639 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1640 __ b(eq, true_label);
1641 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
1642 __ b(eq, false_label);
1643 }
1644 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1645 // 'null' -> false.
1646 __ CompareRoot(reg, Heap::kNullValueRootIndex);
1647 __ b(eq, false_label);
1648 }
1649
1650 if (expected.Contains(ToBooleanStub::SMI)) {
1651 // Smis: 0 -> false, all other -> true.
1652 __ cmp(reg, Operand(0));
1653 __ b(eq, false_label);
1654 __ JumpIfSmi(reg, true_label);
1655 } else if (expected.NeedsMap()) {
1656 // If we need a map later and have a Smi -> deopt.
1657 __ tst(reg, Operand(kSmiTagMask));
1658 DeoptimizeIf(eq, instr->environment());
1659 }
1660
1661 const Register map = scratch0();
1662 if (expected.NeedsMap()) {
1663 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
1664
1665 if (expected.CanBeUndetectable()) {
1666 // Undetectable -> false.
1667 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
1668 __ tst(ip, Operand(1 << Map::kIsUndetectable));
1669 __ b(ne, false_label);
1670 }
1671 }
1672
1673 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1674 // spec object -> true.
1675 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
1676 __ b(ge, true_label);
1677 }
1678
1679 if (expected.Contains(ToBooleanStub::STRING)) {
1680 // String value -> false iff empty.
1681 Label not_string;
1682 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
1683 __ b(ge, ¬_string);
1684 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
1685 __ cmp(ip, Operand(0));
1686 __ b(ne, true_label);
1687 __ b(false_label);
1688 __ bind(¬_string);
1689 }
1690
1691 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1692 // heap number -> false iff +0, -0, or NaN.
1693 DoubleRegister dbl_scratch = double_scratch0();
1694 Label not_heap_number;
1695 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
1696 __ b(ne, ¬_heap_number);
1697 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
1698 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
1699 __ b(vs, false_label); // NaN -> false.
1700 __ b(eq, false_label); // +0, -0 -> false.
1701 __ b(true_label);
1702 __ bind(¬_heap_number);
1703 }
1704
1705 // We've seen something for the first time -> deopt.
1706 DeoptimizeIf(al, instr->environment());
1707 }
1708 }
1709 }
1710
1711
EmitGoto(int block)1712 void LCodeGen::EmitGoto(int block) {
1713 block = chunk_->LookupDestination(block);
1714 int next_block = GetNextEmittedBlock(current_block_);
1715 if (block != next_block) {
1716 __ jmp(chunk_->GetAssemblyLabel(block));
1717 }
1718 }
1719
1720
DoGoto(LGoto * instr)1721 void LCodeGen::DoGoto(LGoto* instr) {
1722 EmitGoto(instr->block_id());
1723 }
1724
1725
TokenToCondition(Token::Value op,bool is_unsigned)1726 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1727 Condition cond = kNoCondition;
1728 switch (op) {
1729 case Token::EQ:
1730 case Token::EQ_STRICT:
1731 cond = eq;
1732 break;
1733 case Token::LT:
1734 cond = is_unsigned ? lo : lt;
1735 break;
1736 case Token::GT:
1737 cond = is_unsigned ? hi : gt;
1738 break;
1739 case Token::LTE:
1740 cond = is_unsigned ? ls : le;
1741 break;
1742 case Token::GTE:
1743 cond = is_unsigned ? hs : ge;
1744 break;
1745 case Token::IN:
1746 case Token::INSTANCEOF:
1747 default:
1748 UNREACHABLE();
1749 }
1750 return cond;
1751 }
1752
1753
DoCmpIDAndBranch(LCmpIDAndBranch * instr)1754 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1755 LOperand* left = instr->InputAt(0);
1756 LOperand* right = instr->InputAt(1);
1757 int false_block = chunk_->LookupDestination(instr->false_block_id());
1758 int true_block = chunk_->LookupDestination(instr->true_block_id());
1759 Condition cond = TokenToCondition(instr->op(), false);
1760
1761 if (left->IsConstantOperand() && right->IsConstantOperand()) {
1762 // We can statically evaluate the comparison.
1763 double left_val = ToDouble(LConstantOperand::cast(left));
1764 double right_val = ToDouble(LConstantOperand::cast(right));
1765 int next_block =
1766 EvalComparison(instr->op(), left_val, right_val) ? true_block
1767 : false_block;
1768 EmitGoto(next_block);
1769 } else {
1770 if (instr->is_double()) {
1771 // Compare left and right operands as doubles and load the
1772 // resulting flags into the normal status register.
1773 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
1774 // If a NaN is involved, i.e. the result is unordered (V set),
1775 // jump to false block label.
1776 __ b(vs, chunk_->GetAssemblyLabel(false_block));
1777 } else {
1778 if (right->IsConstantOperand()) {
1779 __ cmp(ToRegister(left),
1780 Operand(ToInteger32(LConstantOperand::cast(right))));
1781 } else if (left->IsConstantOperand()) {
1782 __ cmp(ToRegister(right),
1783 Operand(ToInteger32(LConstantOperand::cast(left))));
1784 // We transposed the operands. Reverse the condition.
1785 cond = ReverseCondition(cond);
1786 } else {
1787 __ cmp(ToRegister(left), ToRegister(right));
1788 }
1789 }
1790 EmitBranch(true_block, false_block, cond);
1791 }
1792 }
1793
1794
DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch * instr)1795 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1796 Register left = ToRegister(instr->InputAt(0));
1797 Register right = ToRegister(instr->InputAt(1));
1798 int false_block = chunk_->LookupDestination(instr->false_block_id());
1799 int true_block = chunk_->LookupDestination(instr->true_block_id());
1800
1801 __ cmp(left, Operand(right));
1802 EmitBranch(true_block, false_block, eq);
1803 }
1804
1805
DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch * instr)1806 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1807 Register left = ToRegister(instr->InputAt(0));
1808 int true_block = chunk_->LookupDestination(instr->true_block_id());
1809 int false_block = chunk_->LookupDestination(instr->false_block_id());
1810
1811 __ cmp(left, Operand(instr->hydrogen()->right()));
1812 EmitBranch(true_block, false_block, eq);
1813 }
1814
1815
DoIsNilAndBranch(LIsNilAndBranch * instr)1816 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1817 Register scratch = scratch0();
1818 Register reg = ToRegister(instr->InputAt(0));
1819 int false_block = chunk_->LookupDestination(instr->false_block_id());
1820
1821 // If the expression is known to be untagged or a smi, then it's definitely
1822 // not null, and it can't be a an undetectable object.
1823 if (instr->hydrogen()->representation().IsSpecialization() ||
1824 instr->hydrogen()->type().IsSmi()) {
1825 EmitGoto(false_block);
1826 return;
1827 }
1828
1829 int true_block = chunk_->LookupDestination(instr->true_block_id());
1830 Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
1831 Heap::kNullValueRootIndex :
1832 Heap::kUndefinedValueRootIndex;
1833 __ LoadRoot(ip, nil_value);
1834 __ cmp(reg, ip);
1835 if (instr->kind() == kStrictEquality) {
1836 EmitBranch(true_block, false_block, eq);
1837 } else {
1838 Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
1839 Heap::kUndefinedValueRootIndex :
1840 Heap::kNullValueRootIndex;
1841 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1842 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1843 __ b(eq, true_label);
1844 __ LoadRoot(ip, other_nil_value);
1845 __ cmp(reg, ip);
1846 __ b(eq, true_label);
1847 __ JumpIfSmi(reg, false_label);
1848 // Check for undetectable objects by looking in the bit field in
1849 // the map. The object has already been smi checked.
1850 __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
1851 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
1852 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
1853 EmitBranch(true_block, false_block, ne);
1854 }
1855 }
1856
1857
EmitIsObject(Register input,Register temp1,Label * is_not_object,Label * is_object)1858 Condition LCodeGen::EmitIsObject(Register input,
1859 Register temp1,
1860 Label* is_not_object,
1861 Label* is_object) {
1862 Register temp2 = scratch0();
1863 __ JumpIfSmi(input, is_not_object);
1864
1865 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
1866 __ cmp(input, temp2);
1867 __ b(eq, is_object);
1868
1869 // Load map.
1870 __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
1871 // Undetectable objects behave like undefined.
1872 __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
1873 __ tst(temp2, Operand(1 << Map::kIsUndetectable));
1874 __ b(ne, is_not_object);
1875
1876 // Load instance type and check that it is in object type range.
1877 __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
1878 __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1879 __ b(lt, is_not_object);
1880 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1881 return le;
1882 }
1883
1884
DoIsObjectAndBranch(LIsObjectAndBranch * instr)1885 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1886 Register reg = ToRegister(instr->InputAt(0));
1887 Register temp1 = ToRegister(instr->TempAt(0));
1888
1889 int true_block = chunk_->LookupDestination(instr->true_block_id());
1890 int false_block = chunk_->LookupDestination(instr->false_block_id());
1891 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1892 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1893
1894 Condition true_cond =
1895 EmitIsObject(reg, temp1, false_label, true_label);
1896
1897 EmitBranch(true_block, false_block, true_cond);
1898 }
1899
1900
EmitIsString(Register input,Register temp1,Label * is_not_string)1901 Condition LCodeGen::EmitIsString(Register input,
1902 Register temp1,
1903 Label* is_not_string) {
1904 __ JumpIfSmi(input, is_not_string);
1905 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
1906
1907 return lt;
1908 }
1909
1910
DoIsStringAndBranch(LIsStringAndBranch * instr)1911 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
1912 Register reg = ToRegister(instr->InputAt(0));
1913 Register temp1 = ToRegister(instr->TempAt(0));
1914
1915 int true_block = chunk_->LookupDestination(instr->true_block_id());
1916 int false_block = chunk_->LookupDestination(instr->false_block_id());
1917 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1918
1919 Condition true_cond =
1920 EmitIsString(reg, temp1, false_label);
1921
1922 EmitBranch(true_block, false_block, true_cond);
1923 }
1924
1925
DoIsSmiAndBranch(LIsSmiAndBranch * instr)1926 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1927 int true_block = chunk_->LookupDestination(instr->true_block_id());
1928 int false_block = chunk_->LookupDestination(instr->false_block_id());
1929
1930 Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
1931 __ tst(input_reg, Operand(kSmiTagMask));
1932 EmitBranch(true_block, false_block, eq);
1933 }
1934
1935
DoIsUndetectableAndBranch(LIsUndetectableAndBranch * instr)1936 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
1937 Register input = ToRegister(instr->InputAt(0));
1938 Register temp = ToRegister(instr->TempAt(0));
1939
1940 int true_block = chunk_->LookupDestination(instr->true_block_id());
1941 int false_block = chunk_->LookupDestination(instr->false_block_id());
1942
1943 __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
1944 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
1945 __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
1946 __ tst(temp, Operand(1 << Map::kIsUndetectable));
1947 EmitBranch(true_block, false_block, ne);
1948 }
1949
1950
ComputeCompareCondition(Token::Value op)1951 static Condition ComputeCompareCondition(Token::Value op) {
1952 switch (op) {
1953 case Token::EQ_STRICT:
1954 case Token::EQ:
1955 return eq;
1956 case Token::LT:
1957 return lt;
1958 case Token::GT:
1959 return gt;
1960 case Token::LTE:
1961 return le;
1962 case Token::GTE:
1963 return ge;
1964 default:
1965 UNREACHABLE();
1966 return kNoCondition;
1967 }
1968 }
1969
1970
DoStringCompareAndBranch(LStringCompareAndBranch * instr)1971 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
1972 Token::Value op = instr->op();
1973 int true_block = chunk_->LookupDestination(instr->true_block_id());
1974 int false_block = chunk_->LookupDestination(instr->false_block_id());
1975
1976 Handle<Code> ic = CompareIC::GetUninitialized(op);
1977 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1978 __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
1979
1980 Condition condition = ComputeCompareCondition(op);
1981
1982 EmitBranch(true_block, false_block, condition);
1983 }
1984
1985
TestType(HHasInstanceTypeAndBranch * instr)1986 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
1987 InstanceType from = instr->from();
1988 InstanceType to = instr->to();
1989 if (from == FIRST_TYPE) return to;
1990 ASSERT(from == to || to == LAST_TYPE);
1991 return from;
1992 }
1993
1994
BranchCondition(HHasInstanceTypeAndBranch * instr)1995 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
1996 InstanceType from = instr->from();
1997 InstanceType to = instr->to();
1998 if (from == to) return eq;
1999 if (to == LAST_TYPE) return hs;
2000 if (from == FIRST_TYPE) return ls;
2001 UNREACHABLE();
2002 return eq;
2003 }
2004
2005
DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch * instr)2006 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2007 Register scratch = scratch0();
2008 Register input = ToRegister(instr->InputAt(0));
2009
2010 int true_block = chunk_->LookupDestination(instr->true_block_id());
2011 int false_block = chunk_->LookupDestination(instr->false_block_id());
2012
2013 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2014
2015 __ JumpIfSmi(input, false_label);
2016
2017 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2018 EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
2019 }
2020
2021
DoGetCachedArrayIndex(LGetCachedArrayIndex * instr)2022 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2023 Register input = ToRegister(instr->InputAt(0));
2024 Register result = ToRegister(instr->result());
2025
2026 if (FLAG_debug_code) {
2027 __ AbortIfNotString(input);
2028 }
2029
2030 __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
2031 __ IndexFromHash(result, result);
2032 }
2033
2034
DoHasCachedArrayIndexAndBranch(LHasCachedArrayIndexAndBranch * instr)2035 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2036 LHasCachedArrayIndexAndBranch* instr) {
2037 Register input = ToRegister(instr->InputAt(0));
2038 Register scratch = scratch0();
2039
2040 int true_block = chunk_->LookupDestination(instr->true_block_id());
2041 int false_block = chunk_->LookupDestination(instr->false_block_id());
2042
2043 __ ldr(scratch,
2044 FieldMemOperand(input, String::kHashFieldOffset));
2045 __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
2046 EmitBranch(true_block, false_block, eq);
2047 }
2048
2049
2050 // Branches to a label or falls through with the answer in flags. Trashes
2051 // the temp registers, but not the input.
EmitClassOfTest(Label * is_true,Label * is_false,Handle<String> class_name,Register input,Register temp,Register temp2)2052 void LCodeGen::EmitClassOfTest(Label* is_true,
2053 Label* is_false,
2054 Handle<String>class_name,
2055 Register input,
2056 Register temp,
2057 Register temp2) {
2058 ASSERT(!input.is(temp));
2059 ASSERT(!input.is(temp2));
2060 ASSERT(!temp.is(temp2));
2061
2062 __ JumpIfSmi(input, is_false);
2063
2064 if (class_name->IsEqualTo(CStrVector("Function"))) {
2065 // Assuming the following assertions, we can use the same compares to test
2066 // for both being a function type and being in the object type range.
2067 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2068 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2069 FIRST_SPEC_OBJECT_TYPE + 1);
2070 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2071 LAST_SPEC_OBJECT_TYPE - 1);
2072 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2073 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2074 __ b(lt, is_false);
2075 __ b(eq, is_true);
2076 __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2077 __ b(eq, is_true);
2078 } else {
2079 // Faster code path to avoid two compares: subtract lower bound from the
2080 // actual type and do a signed compare with the width of the type range.
2081 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2082 __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2083 __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2084 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2085 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2086 __ b(gt, is_false);
2087 }
2088
2089 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2090 // Check if the constructor in the map is a function.
2091 __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2092
2093 // Objects with a non-function constructor have class 'Object'.
2094 __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
2095 if (class_name->IsEqualTo(CStrVector("Object"))) {
2096 __ b(ne, is_true);
2097 } else {
2098 __ b(ne, is_false);
2099 }
2100
2101 // temp now contains the constructor function. Grab the
2102 // instance class name from there.
2103 __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2104 __ ldr(temp, FieldMemOperand(temp,
2105 SharedFunctionInfo::kInstanceClassNameOffset));
2106 // The class name we are testing against is a symbol because it's a literal.
2107 // The name in the constructor is a symbol because of the way the context is
2108 // booted. This routine isn't expected to work for random API-created
2109 // classes and it doesn't have to because you can't access it with natives
2110 // syntax. Since both sides are symbols it is sufficient to use an identity
2111 // comparison.
2112 __ cmp(temp, Operand(class_name));
2113 // End with the answer in flags.
2114 }
2115
2116
DoClassOfTestAndBranch(LClassOfTestAndBranch * instr)2117 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2118 Register input = ToRegister(instr->InputAt(0));
2119 Register temp = scratch0();
2120 Register temp2 = ToRegister(instr->TempAt(0));
2121 Handle<String> class_name = instr->hydrogen()->class_name();
2122
2123 int true_block = chunk_->LookupDestination(instr->true_block_id());
2124 int false_block = chunk_->LookupDestination(instr->false_block_id());
2125
2126 Label* true_label = chunk_->GetAssemblyLabel(true_block);
2127 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2128
2129 EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
2130
2131 EmitBranch(true_block, false_block, eq);
2132 }
2133
2134
DoCmpMapAndBranch(LCmpMapAndBranch * instr)2135 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2136 Register reg = ToRegister(instr->InputAt(0));
2137 Register temp = ToRegister(instr->TempAt(0));
2138 int true_block = instr->true_block_id();
2139 int false_block = instr->false_block_id();
2140
2141 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2142 __ cmp(temp, Operand(instr->map()));
2143 EmitBranch(true_block, false_block, eq);
2144 }
2145
2146
DoInstanceOf(LInstanceOf * instr)2147 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2148 ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
2149 ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
2150
2151 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2152 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2153
2154 __ cmp(r0, Operand(0));
2155 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
2156 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
2157 }
2158
2159
DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal * instr)2160 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2161 class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2162 public:
2163 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2164 LInstanceOfKnownGlobal* instr)
2165 : LDeferredCode(codegen), instr_(instr) { }
2166 virtual void Generate() {
2167 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2168 }
2169 virtual LInstruction* instr() { return instr_; }
2170 Label* map_check() { return &map_check_; }
2171 private:
2172 LInstanceOfKnownGlobal* instr_;
2173 Label map_check_;
2174 };
2175
2176 DeferredInstanceOfKnownGlobal* deferred;
2177 deferred = new DeferredInstanceOfKnownGlobal(this, instr);
2178
2179 Label done, false_result;
2180 Register object = ToRegister(instr->InputAt(0));
2181 Register temp = ToRegister(instr->TempAt(0));
2182 Register result = ToRegister(instr->result());
2183
2184 ASSERT(object.is(r0));
2185 ASSERT(result.is(r0));
2186
2187 // A Smi is not instance of anything.
2188 __ JumpIfSmi(object, &false_result);
2189
2190 // This is the inlined call site instanceof cache. The two occurences of the
2191 // hole value will be patched to the last map/result pair generated by the
2192 // instanceof stub.
2193 Label cache_miss;
2194 Register map = temp;
2195 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2196 __ bind(deferred->map_check()); // Label for calculating code patching.
2197 // We use Factory::the_hole_value() on purpose instead of loading from the
2198 // root array to force relocation to be able to later patch with
2199 // the cached map.
2200 Handle<JSGlobalPropertyCell> cell =
2201 factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2202 __ mov(ip, Operand(Handle<Object>(cell)));
2203 __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
2204 __ cmp(map, Operand(ip));
2205 __ b(ne, &cache_miss);
2206 // We use Factory::the_hole_value() on purpose instead of loading from the
2207 // root array to force relocation to be able to later patch
2208 // with true or false.
2209 __ mov(result, Operand(factory()->the_hole_value()));
2210 __ b(&done);
2211
2212 // The inlined call site cache did not match. Check null and string before
2213 // calling the deferred code.
2214 __ bind(&cache_miss);
2215 // Null is not instance of anything.
2216 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2217 __ cmp(object, Operand(ip));
2218 __ b(eq, &false_result);
2219
2220 // String values is not instance of anything.
2221 Condition is_string = masm_->IsObjectStringType(object, temp);
2222 __ b(is_string, &false_result);
2223
2224 // Go to the deferred code.
2225 __ b(deferred->entry());
2226
2227 __ bind(&false_result);
2228 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2229
2230 // Here result has either true or false. Deferred code also produces true or
2231 // false object.
2232 __ bind(deferred->exit());
2233 __ bind(&done);
2234 }
2235
2236
DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal * instr,Label * map_check)2237 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2238 Label* map_check) {
2239 Register result = ToRegister(instr->result());
2240 ASSERT(result.is(r0));
2241
2242 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2243 flags = static_cast<InstanceofStub::Flags>(
2244 flags | InstanceofStub::kArgsInRegisters);
2245 flags = static_cast<InstanceofStub::Flags>(
2246 flags | InstanceofStub::kCallSiteInlineCheck);
2247 flags = static_cast<InstanceofStub::Flags>(
2248 flags | InstanceofStub::kReturnTrueFalseObject);
2249 InstanceofStub stub(flags);
2250
2251 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2252
2253 // Get the temp register reserved by the instruction. This needs to be r4 as
2254 // its slot of the pushing of safepoint registers is used to communicate the
2255 // offset to the location of the map check.
2256 Register temp = ToRegister(instr->TempAt(0));
2257 ASSERT(temp.is(r4));
2258 __ LoadHeapObject(InstanceofStub::right(), instr->function());
2259 static const int kAdditionalDelta = 4;
2260 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2261 Label before_push_delta;
2262 __ bind(&before_push_delta);
2263 __ BlockConstPoolFor(kAdditionalDelta);
2264 __ mov(temp, Operand(delta * kPointerSize));
2265 __ StoreToSafepointRegisterSlot(temp, temp);
2266 CallCodeGeneric(stub.GetCode(),
2267 RelocInfo::CODE_TARGET,
2268 instr,
2269 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2270 ASSERT(instr->HasDeoptimizationEnvironment());
2271 LEnvironment* env = instr->deoptimization_environment();
2272 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2273 // Put the result value into the result register slot and
2274 // restore all registers.
2275 __ StoreToSafepointRegisterSlot(result, result);
2276 }
2277
2278
DoCmpT(LCmpT * instr)2279 void LCodeGen::DoCmpT(LCmpT* instr) {
2280 Token::Value op = instr->op();
2281
2282 Handle<Code> ic = CompareIC::GetUninitialized(op);
2283 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2284 __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
2285
2286 Condition condition = ComputeCompareCondition(op);
2287 __ LoadRoot(ToRegister(instr->result()),
2288 Heap::kTrueValueRootIndex,
2289 condition);
2290 __ LoadRoot(ToRegister(instr->result()),
2291 Heap::kFalseValueRootIndex,
2292 NegateCondition(condition));
2293 }
2294
2295
DoReturn(LReturn * instr)2296 void LCodeGen::DoReturn(LReturn* instr) {
2297 if (FLAG_trace) {
2298 // Push the return value on the stack as the parameter.
2299 // Runtime::TraceExit returns its parameter in r0.
2300 __ push(r0);
2301 __ CallRuntime(Runtime::kTraceExit, 1);
2302 }
2303 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2304 __ mov(sp, fp);
2305 __ ldm(ia_w, sp, fp.bit() | lr.bit());
2306 __ add(sp, sp, Operand(sp_delta));
2307 __ Jump(lr);
2308 }
2309
2310
DoLoadGlobalCell(LLoadGlobalCell * instr)2311 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2312 Register result = ToRegister(instr->result());
2313 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
2314 __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
2315 if (instr->hydrogen()->RequiresHoleCheck()) {
2316 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2317 __ cmp(result, ip);
2318 DeoptimizeIf(eq, instr->environment());
2319 }
2320 }
2321
2322
DoLoadGlobalGeneric(LLoadGlobalGeneric * instr)2323 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2324 ASSERT(ToRegister(instr->global_object()).is(r0));
2325 ASSERT(ToRegister(instr->result()).is(r0));
2326
2327 __ mov(r2, Operand(instr->name()));
2328 RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2329 : RelocInfo::CODE_TARGET_CONTEXT;
2330 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2331 CallCode(ic, mode, instr);
2332 }
2333
2334
DoStoreGlobalCell(LStoreGlobalCell * instr)2335 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2336 Register value = ToRegister(instr->value());
2337 Register cell = scratch0();
2338
2339 // Load the cell.
2340 __ mov(cell, Operand(instr->hydrogen()->cell()));
2341
2342 // If the cell we are storing to contains the hole it could have
2343 // been deleted from the property dictionary. In that case, we need
2344 // to update the property details in the property dictionary to mark
2345 // it as no longer deleted.
2346 if (instr->hydrogen()->RequiresHoleCheck()) {
2347 // We use a temp to check the payload (CompareRoot might clobber ip).
2348 Register payload = ToRegister(instr->TempAt(0));
2349 __ ldr(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
2350 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
2351 DeoptimizeIf(eq, instr->environment());
2352 }
2353
2354 // Store the value.
2355 __ str(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
2356 // Cells are always rescanned, so no write barrier here.
2357 }
2358
2359
DoStoreGlobalGeneric(LStoreGlobalGeneric * instr)2360 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2361 ASSERT(ToRegister(instr->global_object()).is(r1));
2362 ASSERT(ToRegister(instr->value()).is(r0));
2363
2364 __ mov(r2, Operand(instr->name()));
2365 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2366 ? isolate()->builtins()->StoreIC_Initialize_Strict()
2367 : isolate()->builtins()->StoreIC_Initialize();
2368 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2369 }
2370
2371
DoLoadContextSlot(LLoadContextSlot * instr)2372 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2373 Register context = ToRegister(instr->context());
2374 Register result = ToRegister(instr->result());
2375 __ ldr(result, ContextOperand(context, instr->slot_index()));
2376 if (instr->hydrogen()->RequiresHoleCheck()) {
2377 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2378 __ cmp(result, ip);
2379 if (instr->hydrogen()->DeoptimizesOnHole()) {
2380 DeoptimizeIf(eq, instr->environment());
2381 } else {
2382 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
2383 }
2384 }
2385 }
2386
2387
DoStoreContextSlot(LStoreContextSlot * instr)2388 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2389 Register context = ToRegister(instr->context());
2390 Register value = ToRegister(instr->value());
2391 Register scratch = scratch0();
2392 MemOperand target = ContextOperand(context, instr->slot_index());
2393
2394 Label skip_assignment;
2395
2396 if (instr->hydrogen()->RequiresHoleCheck()) {
2397 __ ldr(scratch, target);
2398 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2399 __ cmp(scratch, ip);
2400 if (instr->hydrogen()->DeoptimizesOnHole()) {
2401 DeoptimizeIf(eq, instr->environment());
2402 } else {
2403 __ b(ne, &skip_assignment);
2404 }
2405 }
2406
2407 __ str(value, target);
2408 if (instr->hydrogen()->NeedsWriteBarrier()) {
2409 HType type = instr->hydrogen()->value()->type();
2410 SmiCheck check_needed =
2411 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2412 __ RecordWriteContextSlot(context,
2413 target.offset(),
2414 value,
2415 scratch,
2416 kLRHasBeenSaved,
2417 kSaveFPRegs,
2418 EMIT_REMEMBERED_SET,
2419 check_needed);
2420 }
2421
2422 __ bind(&skip_assignment);
2423 }
2424
2425
DoLoadNamedField(LLoadNamedField * instr)2426 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2427 Register object = ToRegister(instr->InputAt(0));
2428 Register result = ToRegister(instr->result());
2429 if (instr->hydrogen()->is_in_object()) {
2430 __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
2431 } else {
2432 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2433 __ ldr(result, FieldMemOperand(result, instr->hydrogen()->offset()));
2434 }
2435 }
2436
2437
EmitLoadFieldOrConstantFunction(Register result,Register object,Handle<Map> type,Handle<String> name)2438 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2439 Register object,
2440 Handle<Map> type,
2441 Handle<String> name) {
2442 LookupResult lookup(isolate());
2443 type->LookupInDescriptors(NULL, *name, &lookup);
2444 ASSERT(lookup.IsFound() &&
2445 (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
2446 if (lookup.type() == FIELD) {
2447 int index = lookup.GetLocalFieldIndexFromMap(*type);
2448 int offset = index * kPointerSize;
2449 if (index < 0) {
2450 // Negative property indices are in-object properties, indexed
2451 // from the end of the fixed part of the object.
2452 __ ldr(result, FieldMemOperand(object, offset + type->instance_size()));
2453 } else {
2454 // Non-negative property indices are in the properties array.
2455 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2456 __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
2457 }
2458 } else {
2459 Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2460 __ LoadHeapObject(result, function);
2461 }
2462 }
2463
2464
DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic * instr)2465 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2466 Register object = ToRegister(instr->object());
2467 Register result = ToRegister(instr->result());
2468 Register scratch = scratch0();
2469 int map_count = instr->hydrogen()->types()->length();
2470 Handle<String> name = instr->hydrogen()->name();
2471 if (map_count == 0) {
2472 ASSERT(instr->hydrogen()->need_generic());
2473 __ mov(r2, Operand(name));
2474 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2475 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2476 } else {
2477 Label done;
2478 __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2479 for (int i = 0; i < map_count - 1; ++i) {
2480 Handle<Map> map = instr->hydrogen()->types()->at(i);
2481 Label next;
2482 __ cmp(scratch, Operand(map));
2483 __ b(ne, &next);
2484 EmitLoadFieldOrConstantFunction(result, object, map, name);
2485 __ b(&done);
2486 __ bind(&next);
2487 }
2488 Handle<Map> map = instr->hydrogen()->types()->last();
2489 __ cmp(scratch, Operand(map));
2490 if (instr->hydrogen()->need_generic()) {
2491 Label generic;
2492 __ b(ne, &generic);
2493 EmitLoadFieldOrConstantFunction(result, object, map, name);
2494 __ b(&done);
2495 __ bind(&generic);
2496 __ mov(r2, Operand(name));
2497 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2498 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2499 } else {
2500 DeoptimizeIf(ne, instr->environment());
2501 EmitLoadFieldOrConstantFunction(result, object, map, name);
2502 }
2503 __ bind(&done);
2504 }
2505 }
2506
2507
DoLoadNamedGeneric(LLoadNamedGeneric * instr)2508 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2509 ASSERT(ToRegister(instr->object()).is(r0));
2510 ASSERT(ToRegister(instr->result()).is(r0));
2511
2512 // Name is always in r2.
2513 __ mov(r2, Operand(instr->name()));
2514 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2515 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2516 }
2517
2518
DoLoadFunctionPrototype(LLoadFunctionPrototype * instr)2519 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2520 Register scratch = scratch0();
2521 Register function = ToRegister(instr->function());
2522 Register result = ToRegister(instr->result());
2523
2524 // Check that the function really is a function. Load map into the
2525 // result register.
2526 __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2527 DeoptimizeIf(ne, instr->environment());
2528
2529 // Make sure that the function has an instance prototype.
2530 Label non_instance;
2531 __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2532 __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
2533 __ b(ne, &non_instance);
2534
2535 // Get the prototype or initial map from the function.
2536 __ ldr(result,
2537 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2538
2539 // Check that the function has a prototype or an initial map.
2540 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2541 __ cmp(result, ip);
2542 DeoptimizeIf(eq, instr->environment());
2543
2544 // If the function does not have an initial map, we're done.
2545 Label done;
2546 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2547 __ b(ne, &done);
2548
2549 // Get the prototype from the initial map.
2550 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2551 __ jmp(&done);
2552
2553 // Non-instance prototype: Fetch prototype from constructor field
2554 // in initial map.
2555 __ bind(&non_instance);
2556 __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
2557
2558 // All done.
2559 __ bind(&done);
2560 }
2561
2562
DoLoadElements(LLoadElements * instr)2563 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2564 Register result = ToRegister(instr->result());
2565 Register input = ToRegister(instr->InputAt(0));
2566 Register scratch = scratch0();
2567
2568 __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
2569 if (FLAG_debug_code) {
2570 Label done, fail;
2571 __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
2572 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2573 __ cmp(scratch, ip);
2574 __ b(eq, &done);
2575 __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2576 __ cmp(scratch, ip);
2577 __ b(eq, &done);
2578 // |scratch| still contains |input|'s map.
2579 __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
2580 __ ubfx(scratch, scratch, Map::kElementsKindShift,
2581 Map::kElementsKindBitCount);
2582 __ cmp(scratch, Operand(FAST_ELEMENTS));
2583 __ b(eq, &done);
2584 __ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2585 __ b(lt, &fail);
2586 __ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2587 __ b(le, &done);
2588 __ bind(&fail);
2589 __ Abort("Check for fast or external elements failed.");
2590 __ bind(&done);
2591 }
2592 }
2593
2594
DoLoadExternalArrayPointer(LLoadExternalArrayPointer * instr)2595 void LCodeGen::DoLoadExternalArrayPointer(
2596 LLoadExternalArrayPointer* instr) {
2597 Register to_reg = ToRegister(instr->result());
2598 Register from_reg = ToRegister(instr->InputAt(0));
2599 __ ldr(to_reg, FieldMemOperand(from_reg,
2600 ExternalArray::kExternalPointerOffset));
2601 }
2602
2603
DoAccessArgumentsAt(LAccessArgumentsAt * instr)2604 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2605 Register arguments = ToRegister(instr->arguments());
2606 Register length = ToRegister(instr->length());
2607 Register index = ToRegister(instr->index());
2608 Register result = ToRegister(instr->result());
2609
2610 // Bailout index is not a valid argument index. Use unsigned check to get
2611 // negative check for free.
2612 __ sub(length, length, index, SetCC);
2613 DeoptimizeIf(ls, instr->environment());
2614
2615 // There are two words between the frame pointer and the last argument.
2616 // Subtracting from length accounts for one of them add one more.
2617 __ add(length, length, Operand(1));
2618 __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
2619 }
2620
2621
DoLoadKeyedFastElement(LLoadKeyedFastElement * instr)2622 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2623 Register elements = ToRegister(instr->elements());
2624 Register key = EmitLoadRegister(instr->key(), scratch0());
2625 Register result = ToRegister(instr->result());
2626 Register scratch = scratch0();
2627
2628 // Load the result.
2629 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
2630 __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2631
2632 // Check for the hole value.
2633 if (instr->hydrogen()->RequiresHoleCheck()) {
2634 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2635 __ cmp(result, scratch);
2636 DeoptimizeIf(eq, instr->environment());
2637 }
2638 }
2639
2640
DoLoadKeyedFastDoubleElement(LLoadKeyedFastDoubleElement * instr)2641 void LCodeGen::DoLoadKeyedFastDoubleElement(
2642 LLoadKeyedFastDoubleElement* instr) {
2643 Register elements = ToRegister(instr->elements());
2644 bool key_is_constant = instr->key()->IsConstantOperand();
2645 Register key = no_reg;
2646 DwVfpRegister result = ToDoubleRegister(instr->result());
2647 Register scratch = scratch0();
2648
2649 int shift_size =
2650 ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2651 int constant_key = 0;
2652 if (key_is_constant) {
2653 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2654 if (constant_key & 0xF0000000) {
2655 Abort("array index constant value too big.");
2656 }
2657 } else {
2658 key = ToRegister(instr->key());
2659 }
2660
2661 Operand operand = key_is_constant
2662 ? Operand(constant_key * (1 << shift_size) +
2663 FixedDoubleArray::kHeaderSize - kHeapObjectTag)
2664 : Operand(key, LSL, shift_size);
2665 __ add(elements, elements, operand);
2666 if (!key_is_constant) {
2667 __ add(elements, elements,
2668 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
2669 }
2670
2671 __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
2672 __ cmp(scratch, Operand(kHoleNanUpper32));
2673 DeoptimizeIf(eq, instr->environment());
2674
2675 __ vldr(result, elements, 0);
2676 }
2677
2678
DoLoadKeyedSpecializedArrayElement(LLoadKeyedSpecializedArrayElement * instr)2679 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2680 LLoadKeyedSpecializedArrayElement* instr) {
2681 Register external_pointer = ToRegister(instr->external_pointer());
2682 Register key = no_reg;
2683 ElementsKind elements_kind = instr->elements_kind();
2684 bool key_is_constant = instr->key()->IsConstantOperand();
2685 int constant_key = 0;
2686 if (key_is_constant) {
2687 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2688 if (constant_key & 0xF0000000) {
2689 Abort("array index constant value too big.");
2690 }
2691 } else {
2692 key = ToRegister(instr->key());
2693 }
2694 int shift_size = ElementsKindToShiftSize(elements_kind);
2695
2696 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
2697 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2698 CpuFeatures::Scope scope(VFP3);
2699 DwVfpRegister result = ToDoubleRegister(instr->result());
2700 Operand operand = key_is_constant
2701 ? Operand(constant_key * (1 << shift_size))
2702 : Operand(key, LSL, shift_size);
2703 __ add(scratch0(), external_pointer, operand);
2704 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2705 __ vldr(result.low(), scratch0(), 0);
2706 __ vcvt_f64_f32(result, result.low());
2707 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2708 __ vldr(result, scratch0(), 0);
2709 }
2710 } else {
2711 Register result = ToRegister(instr->result());
2712 MemOperand mem_operand(key_is_constant
2713 ? MemOperand(external_pointer, constant_key * (1 << shift_size))
2714 : MemOperand(external_pointer, key, LSL, shift_size));
2715 switch (elements_kind) {
2716 case EXTERNAL_BYTE_ELEMENTS:
2717 __ ldrsb(result, mem_operand);
2718 break;
2719 case EXTERNAL_PIXEL_ELEMENTS:
2720 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
2721 __ ldrb(result, mem_operand);
2722 break;
2723 case EXTERNAL_SHORT_ELEMENTS:
2724 __ ldrsh(result, mem_operand);
2725 break;
2726 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
2727 __ ldrh(result, mem_operand);
2728 break;
2729 case EXTERNAL_INT_ELEMENTS:
2730 __ ldr(result, mem_operand);
2731 break;
2732 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
2733 __ ldr(result, mem_operand);
2734 __ cmp(result, Operand(0x80000000));
2735 // TODO(danno): we could be more clever here, perhaps having a special
2736 // version of the stub that detects if the overflow case actually
2737 // happens, and generate code that returns a double rather than int.
2738 DeoptimizeIf(cs, instr->environment());
2739 break;
2740 case EXTERNAL_FLOAT_ELEMENTS:
2741 case EXTERNAL_DOUBLE_ELEMENTS:
2742 case FAST_DOUBLE_ELEMENTS:
2743 case FAST_ELEMENTS:
2744 case FAST_SMI_ONLY_ELEMENTS:
2745 case DICTIONARY_ELEMENTS:
2746 case NON_STRICT_ARGUMENTS_ELEMENTS:
2747 UNREACHABLE();
2748 break;
2749 }
2750 }
2751 }
2752
2753
DoLoadKeyedGeneric(LLoadKeyedGeneric * instr)2754 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2755 ASSERT(ToRegister(instr->object()).is(r1));
2756 ASSERT(ToRegister(instr->key()).is(r0));
2757
2758 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2759 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2760 }
2761
2762
DoArgumentsElements(LArgumentsElements * instr)2763 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2764 Register scratch = scratch0();
2765 Register result = ToRegister(instr->result());
2766
2767 // Check if the calling frame is an arguments adaptor frame.
2768 Label done, adapted;
2769 __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2770 __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
2771 __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2772
2773 // Result is the frame pointer for the frame if not adapted and for the real
2774 // frame below the adaptor frame if adapted.
2775 __ mov(result, fp, LeaveCC, ne);
2776 __ mov(result, scratch, LeaveCC, eq);
2777 }
2778
2779
DoArgumentsLength(LArgumentsLength * instr)2780 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2781 Register elem = ToRegister(instr->InputAt(0));
2782 Register result = ToRegister(instr->result());
2783
2784 Label done;
2785
2786 // If no arguments adaptor frame the number of arguments is fixed.
2787 __ cmp(fp, elem);
2788 __ mov(result, Operand(scope()->num_parameters()));
2789 __ b(eq, &done);
2790
2791 // Arguments adaptor frame present. Get argument length from there.
2792 __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2793 __ ldr(result,
2794 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
2795 __ SmiUntag(result);
2796
2797 // Argument length is in result register.
2798 __ bind(&done);
2799 }
2800
2801
DoWrapReceiver(LWrapReceiver * instr)2802 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2803 Register receiver = ToRegister(instr->receiver());
2804 Register function = ToRegister(instr->function());
2805 Register scratch = scratch0();
2806
2807 // If the receiver is null or undefined, we have to pass the global
2808 // object as a receiver to normal functions. Values have to be
2809 // passed unchanged to builtins and strict-mode functions.
2810 Label global_object, receiver_ok;
2811
2812 // Do not transform the receiver to object for strict mode
2813 // functions.
2814 __ ldr(scratch,
2815 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2816 __ ldr(scratch,
2817 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2818 __ tst(scratch,
2819 Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize)));
2820 __ b(ne, &receiver_ok);
2821
2822 // Do not transform the receiver to object for builtins.
2823 __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
2824 __ b(ne, &receiver_ok);
2825
2826 // Normal function. Replace undefined or null with global receiver.
2827 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
2828 __ cmp(receiver, scratch);
2829 __ b(eq, &global_object);
2830 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
2831 __ cmp(receiver, scratch);
2832 __ b(eq, &global_object);
2833
2834 // Deoptimize if the receiver is not a JS object.
2835 __ tst(receiver, Operand(kSmiTagMask));
2836 DeoptimizeIf(eq, instr->environment());
2837 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
2838 DeoptimizeIf(lt, instr->environment());
2839 __ jmp(&receiver_ok);
2840
2841 __ bind(&global_object);
2842 __ ldr(receiver, GlobalObjectOperand());
2843 __ ldr(receiver,
2844 FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
2845 __ bind(&receiver_ok);
2846 }
2847
2848
DoApplyArguments(LApplyArguments * instr)2849 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2850 Register receiver = ToRegister(instr->receiver());
2851 Register function = ToRegister(instr->function());
2852 Register length = ToRegister(instr->length());
2853 Register elements = ToRegister(instr->elements());
2854 Register scratch = scratch0();
2855 ASSERT(receiver.is(r0)); // Used for parameter count.
2856 ASSERT(function.is(r1)); // Required by InvokeFunction.
2857 ASSERT(ToRegister(instr->result()).is(r0));
2858
2859 // Copy the arguments to this function possibly from the
2860 // adaptor frame below it.
2861 const uint32_t kArgumentsLimit = 1 * KB;
2862 __ cmp(length, Operand(kArgumentsLimit));
2863 DeoptimizeIf(hi, instr->environment());
2864
2865 // Push the receiver and use the register to keep the original
2866 // number of arguments.
2867 __ push(receiver);
2868 __ mov(receiver, length);
2869 // The arguments are at a one pointer size offset from elements.
2870 __ add(elements, elements, Operand(1 * kPointerSize));
2871
2872 // Loop through the arguments pushing them onto the execution
2873 // stack.
2874 Label invoke, loop;
2875 // length is a small non-negative integer, due to the test above.
2876 __ cmp(length, Operand(0));
2877 __ b(eq, &invoke);
2878 __ bind(&loop);
2879 __ ldr(scratch, MemOperand(elements, length, LSL, 2));
2880 __ push(scratch);
2881 __ sub(length, length, Operand(1), SetCC);
2882 __ b(ne, &loop);
2883
2884 __ bind(&invoke);
2885 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
2886 LPointerMap* pointers = instr->pointer_map();
2887 RecordPosition(pointers->position());
2888 SafepointGenerator safepoint_generator(
2889 this, pointers, Safepoint::kLazyDeopt);
2890 // The number of arguments is stored in receiver which is r0, as expected
2891 // by InvokeFunction.
2892 ParameterCount actual(receiver);
2893 __ InvokeFunction(function, actual, CALL_FUNCTION,
2894 safepoint_generator, CALL_AS_METHOD);
2895 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2896 }
2897
2898
DoPushArgument(LPushArgument * instr)2899 void LCodeGen::DoPushArgument(LPushArgument* instr) {
2900 LOperand* argument = instr->InputAt(0);
2901 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
2902 Abort("DoPushArgument not implemented for double type.");
2903 } else {
2904 Register argument_reg = EmitLoadRegister(argument, ip);
2905 __ push(argument_reg);
2906 }
2907 }
2908
2909
DoThisFunction(LThisFunction * instr)2910 void LCodeGen::DoThisFunction(LThisFunction* instr) {
2911 Register result = ToRegister(instr->result());
2912 __ LoadHeapObject(result, instr->hydrogen()->closure());
2913 }
2914
2915
DoContext(LContext * instr)2916 void LCodeGen::DoContext(LContext* instr) {
2917 Register result = ToRegister(instr->result());
2918 __ mov(result, cp);
2919 }
2920
2921
DoOuterContext(LOuterContext * instr)2922 void LCodeGen::DoOuterContext(LOuterContext* instr) {
2923 Register context = ToRegister(instr->context());
2924 Register result = ToRegister(instr->result());
2925 __ ldr(result,
2926 MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2927 }
2928
2929
DoDeclareGlobals(LDeclareGlobals * instr)2930 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
2931 __ push(cp); // The context is the first argument.
2932 __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
2933 __ push(scratch0());
2934 __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
2935 __ push(scratch0());
2936 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
2937 }
2938
2939
DoGlobalObject(LGlobalObject * instr)2940 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2941 Register result = ToRegister(instr->result());
2942 __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
2943 }
2944
2945
DoGlobalReceiver(LGlobalReceiver * instr)2946 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2947 Register global = ToRegister(instr->global());
2948 Register result = ToRegister(instr->result());
2949 __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
2950 }
2951
2952
CallKnownFunction(Handle<JSFunction> function,int arity,LInstruction * instr,CallKind call_kind)2953 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2954 int arity,
2955 LInstruction* instr,
2956 CallKind call_kind) {
2957 bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
2958 function->shared()->formal_parameter_count() == arity;
2959
2960 LPointerMap* pointers = instr->pointer_map();
2961 RecordPosition(pointers->position());
2962
2963 if (can_invoke_directly) {
2964 __ LoadHeapObject(r1, function);
2965 // Change context if needed.
2966 bool change_context =
2967 (info()->closure()->context() != function->context()) ||
2968 scope()->contains_with() ||
2969 (scope()->num_heap_slots() > 0);
2970 if (change_context) {
2971 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
2972 }
2973
2974 // Set r0 to arguments count if adaption is not needed. Assumes that r0
2975 // is available to write to at this point.
2976 if (!function->NeedsArgumentsAdaption()) {
2977 __ mov(r0, Operand(arity));
2978 }
2979
2980 // Invoke function.
2981 __ SetCallKind(r5, call_kind);
2982 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
2983 __ Call(ip);
2984
2985 // Set up deoptimization.
2986 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2987 } else {
2988 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2989 ParameterCount count(arity);
2990 __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
2991 }
2992
2993 // Restore context.
2994 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2995 }
2996
2997
DoCallConstantFunction(LCallConstantFunction * instr)2998 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2999 ASSERT(ToRegister(instr->result()).is(r0));
3000 CallKnownFunction(instr->function(),
3001 instr->arity(),
3002 instr,
3003 CALL_AS_METHOD);
3004 }
3005
3006
DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation * instr)3007 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
3008 Register input = ToRegister(instr->InputAt(0));
3009 Register result = ToRegister(instr->result());
3010 Register scratch = scratch0();
3011
3012 // Deoptimize if not a heap number.
3013 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3014 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3015 __ cmp(scratch, Operand(ip));
3016 DeoptimizeIf(ne, instr->environment());
3017
3018 Label done;
3019 Register exponent = scratch0();
3020 scratch = no_reg;
3021 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3022 // Check the sign of the argument. If the argument is positive, just
3023 // return it.
3024 __ tst(exponent, Operand(HeapNumber::kSignMask));
3025 // Move the input to the result if necessary.
3026 __ Move(result, input);
3027 __ b(eq, &done);
3028
3029 // Input is negative. Reverse its sign.
3030 // Preserve the value of all registers.
3031 {
3032 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3033
3034 // Registers were saved at the safepoint, so we can use
3035 // many scratch registers.
3036 Register tmp1 = input.is(r1) ? r0 : r1;
3037 Register tmp2 = input.is(r2) ? r0 : r2;
3038 Register tmp3 = input.is(r3) ? r0 : r3;
3039 Register tmp4 = input.is(r4) ? r0 : r4;
3040
3041 // exponent: floating point exponent value.
3042
3043 Label allocated, slow;
3044 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3045 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3046 __ b(&allocated);
3047
3048 // Slow case: Call the runtime system to do the number allocation.
3049 __ bind(&slow);
3050
3051 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3052 // Set the pointer to the new heap number in tmp.
3053 if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
3054 // Restore input_reg after call to runtime.
3055 __ LoadFromSafepointRegisterSlot(input, input);
3056 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3057
3058 __ bind(&allocated);
3059 // exponent: floating point exponent value.
3060 // tmp1: allocated heap number.
3061 __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
3062 __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3063 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3064 __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3065
3066 __ StoreToSafepointRegisterSlot(tmp1, result);
3067 }
3068
3069 __ bind(&done);
3070 }
3071
3072
EmitIntegerMathAbs(LUnaryMathOperation * instr)3073 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
3074 Register input = ToRegister(instr->InputAt(0));
3075 Register result = ToRegister(instr->result());
3076 __ cmp(input, Operand(0));
3077 __ Move(result, input, pl);
3078 // We can make rsb conditional because the previous cmp instruction
3079 // will clear the V (overflow) flag and rsb won't set this flag
3080 // if input is positive.
3081 __ rsb(result, input, Operand(0), SetCC, mi);
3082 // Deoptimize on overflow.
3083 DeoptimizeIf(vs, instr->environment());
3084 }
3085
3086
DoMathAbs(LUnaryMathOperation * instr)3087 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3088 // Class for deferred case.
3089 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
3090 public:
3091 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3092 LUnaryMathOperation* instr)
3093 : LDeferredCode(codegen), instr_(instr) { }
3094 virtual void Generate() {
3095 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3096 }
3097 virtual LInstruction* instr() { return instr_; }
3098 private:
3099 LUnaryMathOperation* instr_;
3100 };
3101
3102 Representation r = instr->hydrogen()->value()->representation();
3103 if (r.IsDouble()) {
3104 DwVfpRegister input = ToDoubleRegister(instr->InputAt(0));
3105 DwVfpRegister result = ToDoubleRegister(instr->result());
3106 __ vabs(result, input);
3107 } else if (r.IsInteger32()) {
3108 EmitIntegerMathAbs(instr);
3109 } else {
3110 // Representation is tagged.
3111 DeferredMathAbsTaggedHeapNumber* deferred =
3112 new DeferredMathAbsTaggedHeapNumber(this, instr);
3113 Register input = ToRegister(instr->InputAt(0));
3114 // Smi check.
3115 __ JumpIfNotSmi(input, deferred->entry());
3116 // If smi, handle it directly.
3117 EmitIntegerMathAbs(instr);
3118 __ bind(deferred->exit());
3119 }
3120 }
3121
3122
DoMathFloor(LUnaryMathOperation * instr)3123 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3124 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3125 Register result = ToRegister(instr->result());
3126 SwVfpRegister single_scratch = double_scratch0().low();
3127 Register scratch1 = scratch0();
3128 Register scratch2 = ToRegister(instr->TempAt(0));
3129
3130 __ EmitVFPTruncate(kRoundToMinusInf,
3131 single_scratch,
3132 input,
3133 scratch1,
3134 scratch2);
3135 DeoptimizeIf(ne, instr->environment());
3136
3137 // Move the result back to general purpose register r0.
3138 __ vmov(result, single_scratch);
3139
3140 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3141 // Test for -0.
3142 Label done;
3143 __ cmp(result, Operand(0));
3144 __ b(ne, &done);
3145 __ vmov(scratch1, input.high());
3146 __ tst(scratch1, Operand(HeapNumber::kSignMask));
3147 DeoptimizeIf(ne, instr->environment());
3148 __ bind(&done);
3149 }
3150 }
3151
3152
DoMathRound(LUnaryMathOperation * instr)3153 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3154 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3155 Register result = ToRegister(instr->result());
3156 Register scratch = scratch0();
3157 Label done, check_sign_on_zero;
3158
3159 // Extract exponent bits.
3160 __ vmov(result, input.high());
3161 __ ubfx(scratch,
3162 result,
3163 HeapNumber::kExponentShift,
3164 HeapNumber::kExponentBits);
3165
3166 // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3167 __ cmp(scratch, Operand(HeapNumber::kExponentBias - 2));
3168 __ mov(result, Operand(0), LeaveCC, le);
3169 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3170 __ b(le, &check_sign_on_zero);
3171 } else {
3172 __ b(le, &done);
3173 }
3174
3175 // The following conversion will not work with numbers
3176 // outside of ]-2^32, 2^32[.
3177 __ cmp(scratch, Operand(HeapNumber::kExponentBias + 32));
3178 DeoptimizeIf(ge, instr->environment());
3179
3180 // Save the original sign for later comparison.
3181 __ and_(scratch, result, Operand(HeapNumber::kSignMask));
3182
3183 __ Vmov(double_scratch0(), 0.5);
3184 __ vadd(double_scratch0(), input, double_scratch0());
3185
3186 // Check sign of the result: if the sign changed, the input
3187 // value was in ]0.5, 0[ and the result should be -0.
3188 __ vmov(result, double_scratch0().high());
3189 __ eor(result, result, Operand(scratch), SetCC);
3190 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3191 DeoptimizeIf(mi, instr->environment());
3192 } else {
3193 __ mov(result, Operand(0), LeaveCC, mi);
3194 __ b(mi, &done);
3195 }
3196
3197 __ EmitVFPTruncate(kRoundToMinusInf,
3198 double_scratch0().low(),
3199 double_scratch0(),
3200 result,
3201 scratch);
3202 DeoptimizeIf(ne, instr->environment());
3203 __ vmov(result, double_scratch0().low());
3204
3205 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3206 // Test for -0.
3207 __ cmp(result, Operand(0));
3208 __ b(ne, &done);
3209 __ bind(&check_sign_on_zero);
3210 __ vmov(scratch, input.high());
3211 __ tst(scratch, Operand(HeapNumber::kSignMask));
3212 DeoptimizeIf(ne, instr->environment());
3213 }
3214 __ bind(&done);
3215 }
3216
3217
DoMathSqrt(LUnaryMathOperation * instr)3218 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3219 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3220 DoubleRegister result = ToDoubleRegister(instr->result());
3221 __ vsqrt(result, input);
3222 }
3223
3224
DoMathPowHalf(LUnaryMathOperation * instr)3225 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3226 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3227 DoubleRegister result = ToDoubleRegister(instr->result());
3228 DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
3229
3230 // Note that according to ECMA-262 15.8.2.13:
3231 // Math.pow(-Infinity, 0.5) == Infinity
3232 // Math.sqrt(-Infinity) == NaN
3233 Label done;
3234 __ vmov(temp, -V8_INFINITY);
3235 __ VFPCompareAndSetFlags(input, temp);
3236 __ vneg(result, temp, eq);
3237 __ b(&done, eq);
3238
3239 // Add +0 to convert -0 to +0.
3240 __ vadd(result, input, kDoubleRegZero);
3241 __ vsqrt(result, result);
3242 __ bind(&done);
3243 }
3244
3245
DoPower(LPower * instr)3246 void LCodeGen::DoPower(LPower* instr) {
3247 Representation exponent_type = instr->hydrogen()->right()->representation();
3248 // Having marked this as a call, we can use any registers.
3249 // Just make sure that the input/output registers are the expected ones.
3250 ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
3251 ToDoubleRegister(instr->InputAt(1)).is(d2));
3252 ASSERT(!instr->InputAt(1)->IsRegister() ||
3253 ToRegister(instr->InputAt(1)).is(r2));
3254 ASSERT(ToDoubleRegister(instr->InputAt(0)).is(d1));
3255 ASSERT(ToDoubleRegister(instr->result()).is(d3));
3256
3257 if (exponent_type.IsTagged()) {
3258 Label no_deopt;
3259 __ JumpIfSmi(r2, &no_deopt);
3260 __ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset));
3261 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3262 __ cmp(r7, Operand(ip));
3263 DeoptimizeIf(ne, instr->environment());
3264 __ bind(&no_deopt);
3265 MathPowStub stub(MathPowStub::TAGGED);
3266 __ CallStub(&stub);
3267 } else if (exponent_type.IsInteger32()) {
3268 MathPowStub stub(MathPowStub::INTEGER);
3269 __ CallStub(&stub);
3270 } else {
3271 ASSERT(exponent_type.IsDouble());
3272 MathPowStub stub(MathPowStub::DOUBLE);
3273 __ CallStub(&stub);
3274 }
3275 }
3276
3277
DoRandom(LRandom * instr)3278 void LCodeGen::DoRandom(LRandom* instr) {
3279 class DeferredDoRandom: public LDeferredCode {
3280 public:
3281 DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3282 : LDeferredCode(codegen), instr_(instr) { }
3283 virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3284 virtual LInstruction* instr() { return instr_; }
3285 private:
3286 LRandom* instr_;
3287 };
3288
3289 DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
3290
3291 // Having marked this instruction as a call we can use any
3292 // registers.
3293 ASSERT(ToDoubleRegister(instr->result()).is(d7));
3294 ASSERT(ToRegister(instr->InputAt(0)).is(r0));
3295
3296 static const int kSeedSize = sizeof(uint32_t);
3297 STATIC_ASSERT(kPointerSize == kSeedSize);
3298
3299 __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
3300 static const int kRandomSeedOffset =
3301 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
3302 __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
3303 // r2: FixedArray of the global context's random seeds
3304
3305 // Load state[0].
3306 __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
3307 __ cmp(r1, Operand(0));
3308 __ b(eq, deferred->entry());
3309 // Load state[1].
3310 __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
3311 // r1: state[0].
3312 // r0: state[1].
3313
3314 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3315 __ and_(r3, r1, Operand(0xFFFF));
3316 __ mov(r4, Operand(18273));
3317 __ mul(r3, r3, r4);
3318 __ add(r1, r3, Operand(r1, LSR, 16));
3319 // Save state[0].
3320 __ str(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
3321
3322 // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3323 __ and_(r3, r0, Operand(0xFFFF));
3324 __ mov(r4, Operand(36969));
3325 __ mul(r3, r3, r4);
3326 __ add(r0, r3, Operand(r0, LSR, 16));
3327 // Save state[1].
3328 __ str(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
3329
3330 // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3331 __ and_(r0, r0, Operand(0x3FFFF));
3332 __ add(r0, r0, Operand(r1, LSL, 14));
3333
3334 __ bind(deferred->exit());
3335 // 0x41300000 is the top half of 1.0 x 2^20 as a double.
3336 // Create this constant using mov/orr to avoid PC relative load.
3337 __ mov(r1, Operand(0x41000000));
3338 __ orr(r1, r1, Operand(0x300000));
3339 // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
3340 __ vmov(d7, r0, r1);
3341 // Move 0x4130000000000000 to VFP.
3342 __ mov(r0, Operand(0, RelocInfo::NONE));
3343 __ vmov(d8, r0, r1);
3344 // Subtract and store the result in the heap number.
3345 __ vsub(d7, d7, d8);
3346 }
3347
3348
DoDeferredRandom(LRandom * instr)3349 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3350 __ PrepareCallCFunction(1, scratch0());
3351 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3352 // Return value is in r0.
3353 }
3354
3355
DoMathLog(LUnaryMathOperation * instr)3356 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3357 ASSERT(ToDoubleRegister(instr->result()).is(d2));
3358 TranscendentalCacheStub stub(TranscendentalCache::LOG,
3359 TranscendentalCacheStub::UNTAGGED);
3360 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3361 }
3362
3363
DoMathTan(LUnaryMathOperation * instr)3364 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3365 ASSERT(ToDoubleRegister(instr->result()).is(d2));
3366 TranscendentalCacheStub stub(TranscendentalCache::TAN,
3367 TranscendentalCacheStub::UNTAGGED);
3368 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3369 }
3370
3371
DoMathCos(LUnaryMathOperation * instr)3372 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3373 ASSERT(ToDoubleRegister(instr->result()).is(d2));
3374 TranscendentalCacheStub stub(TranscendentalCache::COS,
3375 TranscendentalCacheStub::UNTAGGED);
3376 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3377 }
3378
3379
DoMathSin(LUnaryMathOperation * instr)3380 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3381 ASSERT(ToDoubleRegister(instr->result()).is(d2));
3382 TranscendentalCacheStub stub(TranscendentalCache::SIN,
3383 TranscendentalCacheStub::UNTAGGED);
3384 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3385 }
3386
3387
DoUnaryMathOperation(LUnaryMathOperation * instr)3388 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3389 switch (instr->op()) {
3390 case kMathAbs:
3391 DoMathAbs(instr);
3392 break;
3393 case kMathFloor:
3394 DoMathFloor(instr);
3395 break;
3396 case kMathRound:
3397 DoMathRound(instr);
3398 break;
3399 case kMathSqrt:
3400 DoMathSqrt(instr);
3401 break;
3402 case kMathPowHalf:
3403 DoMathPowHalf(instr);
3404 break;
3405 case kMathCos:
3406 DoMathCos(instr);
3407 break;
3408 case kMathSin:
3409 DoMathSin(instr);
3410 break;
3411 case kMathTan:
3412 DoMathTan(instr);
3413 break;
3414 case kMathLog:
3415 DoMathLog(instr);
3416 break;
3417 default:
3418 Abort("Unimplemented type of LUnaryMathOperation.");
3419 UNREACHABLE();
3420 }
3421 }
3422
3423
DoInvokeFunction(LInvokeFunction * instr)3424 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3425 ASSERT(ToRegister(instr->function()).is(r1));
3426 ASSERT(instr->HasPointerMap());
3427 ASSERT(instr->HasDeoptimizationEnvironment());
3428 LPointerMap* pointers = instr->pointer_map();
3429 RecordPosition(pointers->position());
3430 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3431 ParameterCount count(instr->arity());
3432 __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3433 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3434 }
3435
3436
DoCallKeyed(LCallKeyed * instr)3437 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3438 ASSERT(ToRegister(instr->result()).is(r0));
3439
3440 int arity = instr->arity();
3441 Handle<Code> ic =
3442 isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3443 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3444 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3445 }
3446
3447
DoCallNamed(LCallNamed * instr)3448 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3449 ASSERT(ToRegister(instr->result()).is(r0));
3450
3451 int arity = instr->arity();
3452 RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3453 Handle<Code> ic =
3454 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3455 __ mov(r2, Operand(instr->name()));
3456 CallCode(ic, mode, instr);
3457 // Restore context register.
3458 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3459 }
3460
3461
DoCallFunction(LCallFunction * instr)3462 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3463 ASSERT(ToRegister(instr->function()).is(r1));
3464 ASSERT(ToRegister(instr->result()).is(r0));
3465
3466 int arity = instr->arity();
3467 CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3468 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3469 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3470 }
3471
3472
DoCallGlobal(LCallGlobal * instr)3473 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3474 ASSERT(ToRegister(instr->result()).is(r0));
3475
3476 int arity = instr->arity();
3477 RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3478 Handle<Code> ic =
3479 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3480 __ mov(r2, Operand(instr->name()));
3481 CallCode(ic, mode, instr);
3482 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3483 }
3484
3485
DoCallKnownGlobal(LCallKnownGlobal * instr)3486 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3487 ASSERT(ToRegister(instr->result()).is(r0));
3488 CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
3489 }
3490
3491
DoCallNew(LCallNew * instr)3492 void LCodeGen::DoCallNew(LCallNew* instr) {
3493 ASSERT(ToRegister(instr->InputAt(0)).is(r1));
3494 ASSERT(ToRegister(instr->result()).is(r0));
3495
3496 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3497 __ mov(r0, Operand(instr->arity()));
3498 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3499 }
3500
3501
DoCallRuntime(LCallRuntime * instr)3502 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3503 CallRuntime(instr->function(), instr->arity(), instr);
3504 }
3505
3506
DoStoreNamedField(LStoreNamedField * instr)3507 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3508 Register object = ToRegister(instr->object());
3509 Register value = ToRegister(instr->value());
3510 Register scratch = scratch0();
3511 int offset = instr->offset();
3512
3513 ASSERT(!object.is(value));
3514
3515 if (!instr->transition().is_null()) {
3516 __ mov(scratch, Operand(instr->transition()));
3517 __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3518 }
3519
3520 // Do the store.
3521 HType type = instr->hydrogen()->value()->type();
3522 SmiCheck check_needed =
3523 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3524 if (instr->is_in_object()) {
3525 __ str(value, FieldMemOperand(object, offset));
3526 if (instr->hydrogen()->NeedsWriteBarrier()) {
3527 // Update the write barrier for the object for in-object properties.
3528 __ RecordWriteField(object,
3529 offset,
3530 value,
3531 scratch,
3532 kLRHasBeenSaved,
3533 kSaveFPRegs,
3534 EMIT_REMEMBERED_SET,
3535 check_needed);
3536 }
3537 } else {
3538 __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3539 __ str(value, FieldMemOperand(scratch, offset));
3540 if (instr->hydrogen()->NeedsWriteBarrier()) {
3541 // Update the write barrier for the properties array.
3542 // object is used as a scratch register.
3543 __ RecordWriteField(scratch,
3544 offset,
3545 value,
3546 object,
3547 kLRHasBeenSaved,
3548 kSaveFPRegs,
3549 EMIT_REMEMBERED_SET,
3550 check_needed);
3551 }
3552 }
3553 }
3554
3555
DoStoreNamedGeneric(LStoreNamedGeneric * instr)3556 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3557 ASSERT(ToRegister(instr->object()).is(r1));
3558 ASSERT(ToRegister(instr->value()).is(r0));
3559
3560 // Name is always in r2.
3561 __ mov(r2, Operand(instr->name()));
3562 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3563 ? isolate()->builtins()->StoreIC_Initialize_Strict()
3564 : isolate()->builtins()->StoreIC_Initialize();
3565 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3566 }
3567
3568
DoBoundsCheck(LBoundsCheck * instr)3569 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3570 __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
3571 DeoptimizeIf(hs, instr->environment());
3572 }
3573
3574
DoStoreKeyedFastElement(LStoreKeyedFastElement * instr)3575 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3576 Register value = ToRegister(instr->value());
3577 Register elements = ToRegister(instr->object());
3578 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3579 Register scratch = scratch0();
3580
3581 // Do the store.
3582 if (instr->key()->IsConstantOperand()) {
3583 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3584 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3585 int offset =
3586 ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
3587 __ str(value, FieldMemOperand(elements, offset));
3588 } else {
3589 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
3590 __ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3591 }
3592
3593 if (instr->hydrogen()->NeedsWriteBarrier()) {
3594 HType type = instr->hydrogen()->value()->type();
3595 SmiCheck check_needed =
3596 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3597 // Compute address of modified element and store it into key register.
3598 __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3599 __ RecordWrite(elements,
3600 key,
3601 value,
3602 kLRHasBeenSaved,
3603 kSaveFPRegs,
3604 EMIT_REMEMBERED_SET,
3605 check_needed);
3606 }
3607 }
3608
3609
DoStoreKeyedFastDoubleElement(LStoreKeyedFastDoubleElement * instr)3610 void LCodeGen::DoStoreKeyedFastDoubleElement(
3611 LStoreKeyedFastDoubleElement* instr) {
3612 DwVfpRegister value = ToDoubleRegister(instr->value());
3613 Register elements = ToRegister(instr->elements());
3614 Register key = no_reg;
3615 Register scratch = scratch0();
3616 bool key_is_constant = instr->key()->IsConstantOperand();
3617 int constant_key = 0;
3618 Label not_nan;
3619
3620 // Calculate the effective address of the slot in the array to store the
3621 // double value.
3622 if (key_is_constant) {
3623 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3624 if (constant_key & 0xF0000000) {
3625 Abort("array index constant value too big.");
3626 }
3627 } else {
3628 key = ToRegister(instr->key());
3629 }
3630 int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3631 Operand operand = key_is_constant
3632 ? Operand(constant_key * (1 << shift_size) +
3633 FixedDoubleArray::kHeaderSize - kHeapObjectTag)
3634 : Operand(key, LSL, shift_size);
3635 __ add(scratch, elements, operand);
3636 if (!key_is_constant) {
3637 __ add(scratch, scratch,
3638 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3639 }
3640
3641 // Check for NaN. All NaNs must be canonicalized.
3642 __ VFPCompareAndSetFlags(value, value);
3643
3644 // Only load canonical NaN if the comparison above set the overflow.
3645 __ Vmov(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double(), vs);
3646
3647 __ bind(¬_nan);
3648 __ vstr(value, scratch, 0);
3649 }
3650
3651
DoStoreKeyedSpecializedArrayElement(LStoreKeyedSpecializedArrayElement * instr)3652 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3653 LStoreKeyedSpecializedArrayElement* instr) {
3654
3655 Register external_pointer = ToRegister(instr->external_pointer());
3656 Register key = no_reg;
3657 ElementsKind elements_kind = instr->elements_kind();
3658 bool key_is_constant = instr->key()->IsConstantOperand();
3659 int constant_key = 0;
3660 if (key_is_constant) {
3661 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3662 if (constant_key & 0xF0000000) {
3663 Abort("array index constant value too big.");
3664 }
3665 } else {
3666 key = ToRegister(instr->key());
3667 }
3668 int shift_size = ElementsKindToShiftSize(elements_kind);
3669
3670 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3671 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3672 CpuFeatures::Scope scope(VFP3);
3673 DwVfpRegister value(ToDoubleRegister(instr->value()));
3674 Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
3675 : Operand(key, LSL, shift_size));
3676 __ add(scratch0(), external_pointer, operand);
3677 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3678 __ vcvt_f32_f64(double_scratch0().low(), value);
3679 __ vstr(double_scratch0().low(), scratch0(), 0);
3680 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3681 __ vstr(value, scratch0(), 0);
3682 }
3683 } else {
3684 Register value(ToRegister(instr->value()));
3685 MemOperand mem_operand(key_is_constant
3686 ? MemOperand(external_pointer, constant_key * (1 << shift_size))
3687 : MemOperand(external_pointer, key, LSL, shift_size));
3688 switch (elements_kind) {
3689 case EXTERNAL_PIXEL_ELEMENTS:
3690 case EXTERNAL_BYTE_ELEMENTS:
3691 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3692 __ strb(value, mem_operand);
3693 break;
3694 case EXTERNAL_SHORT_ELEMENTS:
3695 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3696 __ strh(value, mem_operand);
3697 break;
3698 case EXTERNAL_INT_ELEMENTS:
3699 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3700 __ str(value, mem_operand);
3701 break;
3702 case EXTERNAL_FLOAT_ELEMENTS:
3703 case EXTERNAL_DOUBLE_ELEMENTS:
3704 case FAST_DOUBLE_ELEMENTS:
3705 case FAST_ELEMENTS:
3706 case FAST_SMI_ONLY_ELEMENTS:
3707 case DICTIONARY_ELEMENTS:
3708 case NON_STRICT_ARGUMENTS_ELEMENTS:
3709 UNREACHABLE();
3710 break;
3711 }
3712 }
3713 }
3714
3715
DoStoreKeyedGeneric(LStoreKeyedGeneric * instr)3716 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3717 ASSERT(ToRegister(instr->object()).is(r2));
3718 ASSERT(ToRegister(instr->key()).is(r1));
3719 ASSERT(ToRegister(instr->value()).is(r0));
3720
3721 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3722 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3723 : isolate()->builtins()->KeyedStoreIC_Initialize();
3724 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3725 }
3726
3727
DoTransitionElementsKind(LTransitionElementsKind * instr)3728 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3729 Register object_reg = ToRegister(instr->object());
3730 Register new_map_reg = ToRegister(instr->new_map_reg());
3731 Register scratch = scratch0();
3732
3733 Handle<Map> from_map = instr->original_map();
3734 Handle<Map> to_map = instr->transitioned_map();
3735 ElementsKind from_kind = from_map->elements_kind();
3736 ElementsKind to_kind = to_map->elements_kind();
3737
3738 Label not_applicable;
3739 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3740 __ cmp(scratch, Operand(from_map));
3741 __ b(ne, ¬_applicable);
3742 __ mov(new_map_reg, Operand(to_map));
3743 if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
3744 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3745 // Write barrier.
3746 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
3747 scratch, kLRHasBeenSaved, kDontSaveFPRegs);
3748 } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
3749 to_kind == FAST_DOUBLE_ELEMENTS) {
3750 Register fixed_object_reg = ToRegister(instr->temp_reg());
3751 ASSERT(fixed_object_reg.is(r2));
3752 ASSERT(new_map_reg.is(r3));
3753 __ mov(fixed_object_reg, object_reg);
3754 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3755 RelocInfo::CODE_TARGET, instr);
3756 } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
3757 Register fixed_object_reg = ToRegister(instr->temp_reg());
3758 ASSERT(fixed_object_reg.is(r2));
3759 ASSERT(new_map_reg.is(r3));
3760 __ mov(fixed_object_reg, object_reg);
3761 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3762 RelocInfo::CODE_TARGET, instr);
3763 } else {
3764 UNREACHABLE();
3765 }
3766 __ bind(¬_applicable);
3767 }
3768
3769
DoStringAdd(LStringAdd * instr)3770 void LCodeGen::DoStringAdd(LStringAdd* instr) {
3771 __ push(ToRegister(instr->left()));
3772 __ push(ToRegister(instr->right()));
3773 StringAddStub stub(NO_STRING_CHECK_IN_STUB);
3774 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3775 }
3776
3777
DoStringCharCodeAt(LStringCharCodeAt * instr)3778 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3779 class DeferredStringCharCodeAt: public LDeferredCode {
3780 public:
3781 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3782 : LDeferredCode(codegen), instr_(instr) { }
3783 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3784 virtual LInstruction* instr() { return instr_; }
3785 private:
3786 LStringCharCodeAt* instr_;
3787 };
3788
3789 DeferredStringCharCodeAt* deferred =
3790 new DeferredStringCharCodeAt(this, instr);
3791
3792 StringCharLoadGenerator::Generate(masm(),
3793 ToRegister(instr->string()),
3794 ToRegister(instr->index()),
3795 ToRegister(instr->result()),
3796 deferred->entry());
3797 __ bind(deferred->exit());
3798 }
3799
3800
DoDeferredStringCharCodeAt(LStringCharCodeAt * instr)3801 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3802 Register string = ToRegister(instr->string());
3803 Register result = ToRegister(instr->result());
3804 Register scratch = scratch0();
3805
3806 // TODO(3095996): Get rid of this. For now, we need to make the
3807 // result register contain a valid pointer because it is already
3808 // contained in the register pointer map.
3809 __ mov(result, Operand(0));
3810
3811 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3812 __ push(string);
3813 // Push the index as a smi. This is safe because of the checks in
3814 // DoStringCharCodeAt above.
3815 if (instr->index()->IsConstantOperand()) {
3816 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3817 __ mov(scratch, Operand(Smi::FromInt(const_index)));
3818 __ push(scratch);
3819 } else {
3820 Register index = ToRegister(instr->index());
3821 __ SmiTag(index);
3822 __ push(index);
3823 }
3824 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
3825 if (FLAG_debug_code) {
3826 __ AbortIfNotSmi(r0);
3827 }
3828 __ SmiUntag(r0);
3829 __ StoreToSafepointRegisterSlot(r0, result);
3830 }
3831
3832
DoStringCharFromCode(LStringCharFromCode * instr)3833 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
3834 class DeferredStringCharFromCode: public LDeferredCode {
3835 public:
3836 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
3837 : LDeferredCode(codegen), instr_(instr) { }
3838 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
3839 virtual LInstruction* instr() { return instr_; }
3840 private:
3841 LStringCharFromCode* instr_;
3842 };
3843
3844 DeferredStringCharFromCode* deferred =
3845 new DeferredStringCharFromCode(this, instr);
3846
3847 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
3848 Register char_code = ToRegister(instr->char_code());
3849 Register result = ToRegister(instr->result());
3850 ASSERT(!char_code.is(result));
3851
3852 __ cmp(char_code, Operand(String::kMaxAsciiCharCode));
3853 __ b(hi, deferred->entry());
3854 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
3855 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
3856 __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
3857 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3858 __ cmp(result, ip);
3859 __ b(eq, deferred->entry());
3860 __ bind(deferred->exit());
3861 }
3862
3863
DoDeferredStringCharFromCode(LStringCharFromCode * instr)3864 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
3865 Register char_code = ToRegister(instr->char_code());
3866 Register result = ToRegister(instr->result());
3867
3868 // TODO(3095996): Get rid of this. For now, we need to make the
3869 // result register contain a valid pointer because it is already
3870 // contained in the register pointer map.
3871 __ mov(result, Operand(0));
3872
3873 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3874 __ SmiTag(char_code);
3875 __ push(char_code);
3876 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
3877 __ StoreToSafepointRegisterSlot(r0, result);
3878 }
3879
3880
DoStringLength(LStringLength * instr)3881 void LCodeGen::DoStringLength(LStringLength* instr) {
3882 Register string = ToRegister(instr->InputAt(0));
3883 Register result = ToRegister(instr->result());
3884 __ ldr(result, FieldMemOperand(string, String::kLengthOffset));
3885 }
3886
3887
DoInteger32ToDouble(LInteger32ToDouble * instr)3888 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3889 LOperand* input = instr->InputAt(0);
3890 ASSERT(input->IsRegister() || input->IsStackSlot());
3891 LOperand* output = instr->result();
3892 ASSERT(output->IsDoubleRegister());
3893 SwVfpRegister single_scratch = double_scratch0().low();
3894 if (input->IsStackSlot()) {
3895 Register scratch = scratch0();
3896 __ ldr(scratch, ToMemOperand(input));
3897 __ vmov(single_scratch, scratch);
3898 } else {
3899 __ vmov(single_scratch, ToRegister(input));
3900 }
3901 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
3902 }
3903
3904
DoNumberTagI(LNumberTagI * instr)3905 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
3906 class DeferredNumberTagI: public LDeferredCode {
3907 public:
3908 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
3909 : LDeferredCode(codegen), instr_(instr) { }
3910 virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
3911 virtual LInstruction* instr() { return instr_; }
3912 private:
3913 LNumberTagI* instr_;
3914 };
3915
3916 Register src = ToRegister(instr->InputAt(0));
3917 Register dst = ToRegister(instr->result());
3918
3919 DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
3920 __ SmiTag(dst, src, SetCC);
3921 __ b(vs, deferred->entry());
3922 __ bind(deferred->exit());
3923 }
3924
3925
DoDeferredNumberTagI(LNumberTagI * instr)3926 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
3927 Label slow;
3928 Register src = ToRegister(instr->InputAt(0));
3929 Register dst = ToRegister(instr->result());
3930 DoubleRegister dbl_scratch = double_scratch0();
3931 SwVfpRegister flt_scratch = dbl_scratch.low();
3932
3933 // Preserve the value of all registers.
3934 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3935
3936 // There was overflow, so bits 30 and 31 of the original integer
3937 // disagree. Try to allocate a heap number in new space and store
3938 // the value in there. If that fails, call the runtime system.
3939 Label done;
3940 if (dst.is(src)) {
3941 __ SmiUntag(src, dst);
3942 __ eor(src, src, Operand(0x80000000));
3943 }
3944 __ vmov(flt_scratch, src);
3945 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
3946 if (FLAG_inline_new) {
3947 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3948 __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
3949 __ Move(dst, r5);
3950 __ b(&done);
3951 }
3952
3953 // Slow case: Call the runtime system to do the number allocation.
3954 __ bind(&slow);
3955
3956 // TODO(3095996): Put a valid pointer value in the stack slot where the result
3957 // register is stored, as this register is in the pointer map, but contains an
3958 // integer value.
3959 __ mov(ip, Operand(0));
3960 __ StoreToSafepointRegisterSlot(ip, dst);
3961 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3962 __ Move(dst, r0);
3963
3964 // Done. Put the value in dbl_scratch into the value of the allocated heap
3965 // number.
3966 __ bind(&done);
3967 __ sub(ip, dst, Operand(kHeapObjectTag));
3968 __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
3969 __ StoreToSafepointRegisterSlot(dst, dst);
3970 }
3971
3972
DoNumberTagD(LNumberTagD * instr)3973 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
3974 class DeferredNumberTagD: public LDeferredCode {
3975 public:
3976 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
3977 : LDeferredCode(codegen), instr_(instr) { }
3978 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
3979 virtual LInstruction* instr() { return instr_; }
3980 private:
3981 LNumberTagD* instr_;
3982 };
3983
3984 DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
3985 Register scratch = scratch0();
3986 Register reg = ToRegister(instr->result());
3987 Register temp1 = ToRegister(instr->TempAt(0));
3988 Register temp2 = ToRegister(instr->TempAt(1));
3989
3990 DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
3991 if (FLAG_inline_new) {
3992 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
3993 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
3994 } else {
3995 __ jmp(deferred->entry());
3996 }
3997 __ bind(deferred->exit());
3998 __ sub(ip, reg, Operand(kHeapObjectTag));
3999 __ vstr(input_reg, ip, HeapNumber::kValueOffset);
4000 }
4001
4002
DoDeferredNumberTagD(LNumberTagD * instr)4003 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4004 // TODO(3095996): Get rid of this. For now, we need to make the
4005 // result register contain a valid pointer because it is already
4006 // contained in the register pointer map.
4007 Register reg = ToRegister(instr->result());
4008 __ mov(reg, Operand(0));
4009
4010 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4011 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4012 __ StoreToSafepointRegisterSlot(r0, reg);
4013 }
4014
4015
DoSmiTag(LSmiTag * instr)4016 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4017 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
4018 __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
4019 }
4020
4021
DoSmiUntag(LSmiUntag * instr)4022 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4023 Register input = ToRegister(instr->InputAt(0));
4024 Register result = ToRegister(instr->result());
4025 if (instr->needs_check()) {
4026 STATIC_ASSERT(kHeapObjectTag == 1);
4027 // If the input is a HeapObject, SmiUntag will set the carry flag.
4028 __ SmiUntag(result, input, SetCC);
4029 DeoptimizeIf(cs, instr->environment());
4030 } else {
4031 __ SmiUntag(result, input);
4032 }
4033 }
4034
4035
EmitNumberUntagD(Register input_reg,DoubleRegister result_reg,bool deoptimize_on_undefined,bool deoptimize_on_minus_zero,LEnvironment * env)4036 void LCodeGen::EmitNumberUntagD(Register input_reg,
4037 DoubleRegister result_reg,
4038 bool deoptimize_on_undefined,
4039 bool deoptimize_on_minus_zero,
4040 LEnvironment* env) {
4041 Register scratch = scratch0();
4042 SwVfpRegister flt_scratch = double_scratch0().low();
4043 ASSERT(!result_reg.is(double_scratch0()));
4044
4045 Label load_smi, heap_number, done;
4046
4047 // Smi check.
4048 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4049
4050 // Heap number map check.
4051 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4052 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4053 __ cmp(scratch, Operand(ip));
4054 if (deoptimize_on_undefined) {
4055 DeoptimizeIf(ne, env);
4056 } else {
4057 Label heap_number;
4058 __ b(eq, &heap_number);
4059
4060 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4061 __ cmp(input_reg, Operand(ip));
4062 DeoptimizeIf(ne, env);
4063
4064 // Convert undefined to NaN.
4065 __ LoadRoot(ip, Heap::kNanValueRootIndex);
4066 __ sub(ip, ip, Operand(kHeapObjectTag));
4067 __ vldr(result_reg, ip, HeapNumber::kValueOffset);
4068 __ jmp(&done);
4069
4070 __ bind(&heap_number);
4071 }
4072 // Heap number to double register conversion.
4073 __ sub(ip, input_reg, Operand(kHeapObjectTag));
4074 __ vldr(result_reg, ip, HeapNumber::kValueOffset);
4075 if (deoptimize_on_minus_zero) {
4076 __ vmov(ip, result_reg.low());
4077 __ cmp(ip, Operand(0));
4078 __ b(ne, &done);
4079 __ vmov(ip, result_reg.high());
4080 __ cmp(ip, Operand(HeapNumber::kSignMask));
4081 DeoptimizeIf(eq, env);
4082 }
4083 __ jmp(&done);
4084
4085 // Smi to double register conversion
4086 __ bind(&load_smi);
4087 // scratch: untagged value of input_reg
4088 __ vmov(flt_scratch, scratch);
4089 __ vcvt_f64_s32(result_reg, flt_scratch);
4090 __ bind(&done);
4091 }
4092
4093
DoDeferredTaggedToI(LTaggedToI * instr)4094 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4095 Register input_reg = ToRegister(instr->InputAt(0));
4096 Register scratch1 = scratch0();
4097 Register scratch2 = ToRegister(instr->TempAt(0));
4098 DwVfpRegister double_scratch = double_scratch0();
4099 SwVfpRegister single_scratch = double_scratch.low();
4100
4101 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4102 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4103
4104 Label done;
4105
4106 // The input was optimistically untagged; revert it.
4107 // The carry flag is set when we reach this deferred code as we just executed
4108 // SmiUntag(heap_object, SetCC)
4109 STATIC_ASSERT(kHeapObjectTag == 1);
4110 __ adc(input_reg, input_reg, Operand(input_reg));
4111
4112 // Heap number map check.
4113 __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4114 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4115 __ cmp(scratch1, Operand(ip));
4116
4117 if (instr->truncating()) {
4118 Register scratch3 = ToRegister(instr->TempAt(1));
4119 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
4120 ASSERT(!scratch3.is(input_reg) &&
4121 !scratch3.is(scratch1) &&
4122 !scratch3.is(scratch2));
4123 // Performs a truncating conversion of a floating point number as used by
4124 // the JS bitwise operations.
4125 Label heap_number;
4126 __ b(eq, &heap_number);
4127 // Check for undefined. Undefined is converted to zero for truncating
4128 // conversions.
4129 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4130 __ cmp(input_reg, Operand(ip));
4131 DeoptimizeIf(ne, instr->environment());
4132 __ mov(input_reg, Operand(0));
4133 __ b(&done);
4134
4135 __ bind(&heap_number);
4136 __ sub(scratch1, input_reg, Operand(kHeapObjectTag));
4137 __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
4138
4139 __ EmitECMATruncate(input_reg,
4140 double_scratch2,
4141 single_scratch,
4142 scratch1,
4143 scratch2,
4144 scratch3);
4145
4146 } else {
4147 CpuFeatures::Scope scope(VFP3);
4148 // Deoptimize if we don't have a heap number.
4149 DeoptimizeIf(ne, instr->environment());
4150
4151 __ sub(ip, input_reg, Operand(kHeapObjectTag));
4152 __ vldr(double_scratch, ip, HeapNumber::kValueOffset);
4153 __ EmitVFPTruncate(kRoundToZero,
4154 single_scratch,
4155 double_scratch,
4156 scratch1,
4157 scratch2,
4158 kCheckForInexactConversion);
4159 DeoptimizeIf(ne, instr->environment());
4160 // Load the result.
4161 __ vmov(input_reg, single_scratch);
4162
4163 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4164 __ cmp(input_reg, Operand(0));
4165 __ b(ne, &done);
4166 __ vmov(scratch1, double_scratch.high());
4167 __ tst(scratch1, Operand(HeapNumber::kSignMask));
4168 DeoptimizeIf(ne, instr->environment());
4169 }
4170 }
4171 __ bind(&done);
4172 }
4173
4174
DoTaggedToI(LTaggedToI * instr)4175 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4176 class DeferredTaggedToI: public LDeferredCode {
4177 public:
4178 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4179 : LDeferredCode(codegen), instr_(instr) { }
4180 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
4181 virtual LInstruction* instr() { return instr_; }
4182 private:
4183 LTaggedToI* instr_;
4184 };
4185
4186 LOperand* input = instr->InputAt(0);
4187 ASSERT(input->IsRegister());
4188 ASSERT(input->Equals(instr->result()));
4189
4190 Register input_reg = ToRegister(input);
4191
4192 DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
4193
4194 // Optimistically untag the input.
4195 // If the input is a HeapObject, SmiUntag will set the carry flag.
4196 __ SmiUntag(input_reg, SetCC);
4197 // Branch to deferred code if the input was tagged.
4198 // The deferred code will take care of restoring the tag.
4199 __ b(cs, deferred->entry());
4200 __ bind(deferred->exit());
4201 }
4202
4203
DoNumberUntagD(LNumberUntagD * instr)4204 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4205 LOperand* input = instr->InputAt(0);
4206 ASSERT(input->IsRegister());
4207 LOperand* result = instr->result();
4208 ASSERT(result->IsDoubleRegister());
4209
4210 Register input_reg = ToRegister(input);
4211 DoubleRegister result_reg = ToDoubleRegister(result);
4212
4213 EmitNumberUntagD(input_reg, result_reg,
4214 instr->hydrogen()->deoptimize_on_undefined(),
4215 instr->hydrogen()->deoptimize_on_minus_zero(),
4216 instr->environment());
4217 }
4218
4219
DoDoubleToI(LDoubleToI * instr)4220 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4221 Register result_reg = ToRegister(instr->result());
4222 Register scratch1 = scratch0();
4223 Register scratch2 = ToRegister(instr->TempAt(0));
4224 DwVfpRegister double_input = ToDoubleRegister(instr->InputAt(0));
4225 SwVfpRegister single_scratch = double_scratch0().low();
4226
4227 Label done;
4228
4229 if (instr->truncating()) {
4230 Register scratch3 = ToRegister(instr->TempAt(1));
4231 __ EmitECMATruncate(result_reg,
4232 double_input,
4233 single_scratch,
4234 scratch1,
4235 scratch2,
4236 scratch3);
4237 } else {
4238 VFPRoundingMode rounding_mode = kRoundToMinusInf;
4239 __ EmitVFPTruncate(rounding_mode,
4240 single_scratch,
4241 double_input,
4242 scratch1,
4243 scratch2,
4244 kCheckForInexactConversion);
4245 // Deoptimize if we had a vfp invalid exception,
4246 // including inexact operation.
4247 DeoptimizeIf(ne, instr->environment());
4248 // Retrieve the result.
4249 __ vmov(result_reg, single_scratch);
4250 }
4251 __ bind(&done);
4252 }
4253
4254
DoCheckSmi(LCheckSmi * instr)4255 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4256 LOperand* input = instr->InputAt(0);
4257 __ tst(ToRegister(input), Operand(kSmiTagMask));
4258 DeoptimizeIf(ne, instr->environment());
4259 }
4260
4261
DoCheckNonSmi(LCheckNonSmi * instr)4262 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4263 LOperand* input = instr->InputAt(0);
4264 __ tst(ToRegister(input), Operand(kSmiTagMask));
4265 DeoptimizeIf(eq, instr->environment());
4266 }
4267
4268
DoCheckInstanceType(LCheckInstanceType * instr)4269 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4270 Register input = ToRegister(instr->InputAt(0));
4271 Register scratch = scratch0();
4272
4273 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
4274 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4275
4276 if (instr->hydrogen()->is_interval_check()) {
4277 InstanceType first;
4278 InstanceType last;
4279 instr->hydrogen()->GetCheckInterval(&first, &last);
4280
4281 __ cmp(scratch, Operand(first));
4282
4283 // If there is only one type in the interval check for equality.
4284 if (first == last) {
4285 DeoptimizeIf(ne, instr->environment());
4286 } else {
4287 DeoptimizeIf(lo, instr->environment());
4288 // Omit check for the last type.
4289 if (last != LAST_TYPE) {
4290 __ cmp(scratch, Operand(last));
4291 DeoptimizeIf(hi, instr->environment());
4292 }
4293 }
4294 } else {
4295 uint8_t mask;
4296 uint8_t tag;
4297 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4298
4299 if (IsPowerOf2(mask)) {
4300 ASSERT(tag == 0 || IsPowerOf2(tag));
4301 __ tst(scratch, Operand(mask));
4302 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
4303 } else {
4304 __ and_(scratch, scratch, Operand(mask));
4305 __ cmp(scratch, Operand(tag));
4306 DeoptimizeIf(ne, instr->environment());
4307 }
4308 }
4309 }
4310
4311
DoCheckFunction(LCheckFunction * instr)4312 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4313 Register reg = ToRegister(instr->value());
4314 Handle<JSFunction> target = instr->hydrogen()->target();
4315 if (isolate()->heap()->InNewSpace(*target)) {
4316 Register reg = ToRegister(instr->value());
4317 Handle<JSGlobalPropertyCell> cell =
4318 isolate()->factory()->NewJSGlobalPropertyCell(target);
4319 __ mov(ip, Operand(Handle<Object>(cell)));
4320 __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
4321 __ cmp(reg, ip);
4322 } else {
4323 __ cmp(reg, Operand(target));
4324 }
4325 DeoptimizeIf(ne, instr->environment());
4326 }
4327
4328
DoCheckMapCommon(Register reg,Register scratch,Handle<Map> map,CompareMapMode mode,LEnvironment * env)4329 void LCodeGen::DoCheckMapCommon(Register reg,
4330 Register scratch,
4331 Handle<Map> map,
4332 CompareMapMode mode,
4333 LEnvironment* env) {
4334 Label success;
4335 __ CompareMap(reg, scratch, map, &success, mode);
4336 DeoptimizeIf(ne, env);
4337 __ bind(&success);
4338 }
4339
4340
DoCheckMap(LCheckMap * instr)4341 void LCodeGen::DoCheckMap(LCheckMap* instr) {
4342 Register scratch = scratch0();
4343 LOperand* input = instr->InputAt(0);
4344 ASSERT(input->IsRegister());
4345 Register reg = ToRegister(input);
4346 Handle<Map> map = instr->hydrogen()->map();
4347 DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(),
4348 instr->environment());
4349 }
4350
4351
DoClampDToUint8(LClampDToUint8 * instr)4352 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4353 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
4354 Register result_reg = ToRegister(instr->result());
4355 DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4356 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4357 }
4358
4359
DoClampIToUint8(LClampIToUint8 * instr)4360 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4361 Register unclamped_reg = ToRegister(instr->unclamped());
4362 Register result_reg = ToRegister(instr->result());
4363 __ ClampUint8(result_reg, unclamped_reg);
4364 }
4365
4366
DoClampTToUint8(LClampTToUint8 * instr)4367 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4368 Register scratch = scratch0();
4369 Register input_reg = ToRegister(instr->unclamped());
4370 Register result_reg = ToRegister(instr->result());
4371 DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4372 Label is_smi, done, heap_number;
4373
4374 // Both smi and heap number cases are handled.
4375 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
4376
4377 // Check for heap number
4378 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4379 __ cmp(scratch, Operand(factory()->heap_number_map()));
4380 __ b(eq, &heap_number);
4381
4382 // Check for undefined. Undefined is converted to zero for clamping
4383 // conversions.
4384 __ cmp(input_reg, Operand(factory()->undefined_value()));
4385 DeoptimizeIf(ne, instr->environment());
4386 __ mov(result_reg, Operand(0));
4387 __ jmp(&done);
4388
4389 // Heap number
4390 __ bind(&heap_number);
4391 __ vldr(double_scratch0(), FieldMemOperand(input_reg,
4392 HeapNumber::kValueOffset));
4393 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4394 __ jmp(&done);
4395
4396 // smi
4397 __ bind(&is_smi);
4398 __ ClampUint8(result_reg, result_reg);
4399
4400 __ bind(&done);
4401 }
4402
4403
DoCheckPrototypeMaps(LCheckPrototypeMaps * instr)4404 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4405 Register temp1 = ToRegister(instr->TempAt(0));
4406 Register temp2 = ToRegister(instr->TempAt(1));
4407
4408 Handle<JSObject> holder = instr->holder();
4409 Handle<JSObject> current_prototype = instr->prototype();
4410
4411 // Load prototype object.
4412 __ LoadHeapObject(temp1, current_prototype);
4413
4414 // Check prototype maps up to the holder.
4415 while (!current_prototype.is_identical_to(holder)) {
4416 DoCheckMapCommon(temp1, temp2,
4417 Handle<Map>(current_prototype->map()),
4418 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4419 current_prototype =
4420 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4421 // Load next prototype object.
4422 __ LoadHeapObject(temp1, current_prototype);
4423 }
4424
4425 // Check the holder map.
4426 DoCheckMapCommon(temp1, temp2,
4427 Handle<Map>(current_prototype->map()),
4428 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4429 DeoptimizeIf(ne, instr->environment());
4430 }
4431
4432
DoAllocateObject(LAllocateObject * instr)4433 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4434 class DeferredAllocateObject: public LDeferredCode {
4435 public:
4436 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4437 : LDeferredCode(codegen), instr_(instr) { }
4438 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4439 virtual LInstruction* instr() { return instr_; }
4440 private:
4441 LAllocateObject* instr_;
4442 };
4443
4444 DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
4445
4446 Register result = ToRegister(instr->result());
4447 Register scratch = ToRegister(instr->TempAt(0));
4448 Register scratch2 = ToRegister(instr->TempAt(1));
4449 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4450 Handle<Map> initial_map(constructor->initial_map());
4451 int instance_size = initial_map->instance_size();
4452 ASSERT(initial_map->pre_allocated_property_fields() +
4453 initial_map->unused_property_fields() -
4454 initial_map->inobject_properties() == 0);
4455
4456 // Allocate memory for the object. The initial map might change when
4457 // the constructor's prototype changes, but instance size and property
4458 // counts remain unchanged (if slack tracking finished).
4459 ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
4460 __ AllocateInNewSpace(instance_size,
4461 result,
4462 scratch,
4463 scratch2,
4464 deferred->entry(),
4465 TAG_OBJECT);
4466
4467 // Load the initial map.
4468 Register map = scratch;
4469 __ LoadHeapObject(map, constructor);
4470 __ ldr(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
4471
4472 // Initialize map and fields of the newly allocated object.
4473 ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
4474 __ str(map, FieldMemOperand(result, JSObject::kMapOffset));
4475 __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
4476 __ str(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
4477 __ str(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
4478 if (initial_map->inobject_properties() != 0) {
4479 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4480 for (int i = 0; i < initial_map->inobject_properties(); i++) {
4481 int property_offset = JSObject::kHeaderSize + i * kPointerSize;
4482 __ str(scratch, FieldMemOperand(result, property_offset));
4483 }
4484 }
4485
4486 __ bind(deferred->exit());
4487 }
4488
4489
DoDeferredAllocateObject(LAllocateObject * instr)4490 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4491 Register result = ToRegister(instr->result());
4492 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4493
4494 // TODO(3095996): Get rid of this. For now, we need to make the
4495 // result register contain a valid pointer because it is already
4496 // contained in the register pointer map.
4497 __ mov(result, Operand(0));
4498
4499 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4500 __ LoadHeapObject(r0, constructor);
4501 __ push(r0);
4502 CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr);
4503 __ StoreToSafepointRegisterSlot(r0, result);
4504 }
4505
4506
DoArrayLiteral(LArrayLiteral * instr)4507 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4508 Heap* heap = isolate()->heap();
4509 ElementsKind boilerplate_elements_kind =
4510 instr->hydrogen()->boilerplate_elements_kind();
4511
4512 // Deopt if the array literal boilerplate ElementsKind is of a type different
4513 // than the expected one. The check isn't necessary if the boilerplate has
4514 // already been converted to FAST_ELEMENTS.
4515 if (boilerplate_elements_kind != FAST_ELEMENTS) {
4516 __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
4517 // Load map into r2.
4518 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
4519 // Load the map's "bit field 2".
4520 __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
4521 // Retrieve elements_kind from bit field 2.
4522 __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
4523 __ cmp(r2, Operand(boilerplate_elements_kind));
4524 DeoptimizeIf(ne, instr->environment());
4525 }
4526
4527 __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4528 __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
4529 __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4530 // Boilerplate already exists, constant elements are never accessed.
4531 // Pass an empty fixed array.
4532 __ mov(r1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
4533 __ Push(r3, r2, r1);
4534
4535 // Pick the right runtime function or stub to call.
4536 int length = instr->hydrogen()->length();
4537 if (instr->hydrogen()->IsCopyOnWrite()) {
4538 ASSERT(instr->hydrogen()->depth() == 1);
4539 FastCloneShallowArrayStub::Mode mode =
4540 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
4541 FastCloneShallowArrayStub stub(mode, length);
4542 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4543 } else if (instr->hydrogen()->depth() > 1) {
4544 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4545 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
4546 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4547 } else {
4548 FastCloneShallowArrayStub::Mode mode =
4549 boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
4550 ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
4551 : FastCloneShallowArrayStub::CLONE_ELEMENTS;
4552 FastCloneShallowArrayStub stub(mode, length);
4553 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4554 }
4555 }
4556
4557
EmitDeepCopy(Handle<JSObject> object,Register result,Register source,int * offset)4558 void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4559 Register result,
4560 Register source,
4561 int* offset) {
4562 ASSERT(!source.is(r2));
4563 ASSERT(!result.is(r2));
4564
4565 // Only elements backing stores for non-COW arrays need to be copied.
4566 Handle<FixedArrayBase> elements(object->elements());
4567 bool has_elements = elements->length() > 0 &&
4568 elements->map() != isolate()->heap()->fixed_cow_array_map();
4569
4570 // Increase the offset so that subsequent objects end up right after
4571 // this object and its backing store.
4572 int object_offset = *offset;
4573 int object_size = object->map()->instance_size();
4574 int elements_offset = *offset + object_size;
4575 int elements_size = has_elements ? elements->Size() : 0;
4576 *offset += object_size + elements_size;
4577
4578 // Copy object header.
4579 ASSERT(object->properties()->length() == 0);
4580 int inobject_properties = object->map()->inobject_properties();
4581 int header_size = object_size - inobject_properties * kPointerSize;
4582 for (int i = 0; i < header_size; i += kPointerSize) {
4583 if (has_elements && i == JSObject::kElementsOffset) {
4584 __ add(r2, result, Operand(elements_offset));
4585 } else {
4586 __ ldr(r2, FieldMemOperand(source, i));
4587 }
4588 __ str(r2, FieldMemOperand(result, object_offset + i));
4589 }
4590
4591 // Copy in-object properties.
4592 for (int i = 0; i < inobject_properties; i++) {
4593 int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
4594 Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
4595 if (value->IsJSObject()) {
4596 Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4597 __ add(r2, result, Operand(*offset));
4598 __ str(r2, FieldMemOperand(result, total_offset));
4599 __ LoadHeapObject(source, value_object);
4600 EmitDeepCopy(value_object, result, source, offset);
4601 } else if (value->IsHeapObject()) {
4602 __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
4603 __ str(r2, FieldMemOperand(result, total_offset));
4604 } else {
4605 __ mov(r2, Operand(value));
4606 __ str(r2, FieldMemOperand(result, total_offset));
4607 }
4608 }
4609
4610 if (has_elements) {
4611 // Copy elements backing store header.
4612 __ LoadHeapObject(source, elements);
4613 for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
4614 __ ldr(r2, FieldMemOperand(source, i));
4615 __ str(r2, FieldMemOperand(result, elements_offset + i));
4616 }
4617
4618 // Copy elements backing store content.
4619 int elements_length = has_elements ? elements->length() : 0;
4620 if (elements->IsFixedDoubleArray()) {
4621 Handle<FixedDoubleArray> double_array =
4622 Handle<FixedDoubleArray>::cast(elements);
4623 for (int i = 0; i < elements_length; i++) {
4624 int64_t value = double_array->get_representation(i);
4625 // We only support little endian mode...
4626 int32_t value_low = value & 0xFFFFFFFF;
4627 int32_t value_high = value >> 32;
4628 int total_offset =
4629 elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
4630 __ mov(r2, Operand(value_low));
4631 __ str(r2, FieldMemOperand(result, total_offset));
4632 __ mov(r2, Operand(value_high));
4633 __ str(r2, FieldMemOperand(result, total_offset + 4));
4634 }
4635 } else if (elements->IsFixedArray()) {
4636 for (int i = 0; i < elements_length; i++) {
4637 int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
4638 Handle<Object> value = JSObject::GetElement(object, i);
4639 if (value->IsJSObject()) {
4640 Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4641 __ add(r2, result, Operand(*offset));
4642 __ str(r2, FieldMemOperand(result, total_offset));
4643 __ LoadHeapObject(source, value_object);
4644 EmitDeepCopy(value_object, result, source, offset);
4645 } else if (value->IsHeapObject()) {
4646 __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
4647 __ str(r2, FieldMemOperand(result, total_offset));
4648 } else {
4649 __ mov(r2, Operand(value));
4650 __ str(r2, FieldMemOperand(result, total_offset));
4651 }
4652 }
4653 } else {
4654 UNREACHABLE();
4655 }
4656 }
4657 }
4658
4659
DoFastLiteral(LFastLiteral * instr)4660 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
4661 int size = instr->hydrogen()->total_size();
4662
4663 // Allocate all objects that are part of the literal in one big
4664 // allocation. This avoids multiple limit checks.
4665 Label allocated, runtime_allocate;
4666 __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
4667 __ jmp(&allocated);
4668
4669 __ bind(&runtime_allocate);
4670 __ mov(r0, Operand(Smi::FromInt(size)));
4671 __ push(r0);
4672 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4673
4674 __ bind(&allocated);
4675 int offset = 0;
4676 __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
4677 EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset);
4678 ASSERT_EQ(size, offset);
4679 }
4680
4681
DoObjectLiteral(LObjectLiteral * instr)4682 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4683 Handle<FixedArray> literals(instr->environment()->closure()->literals());
4684 Handle<FixedArray> constant_properties =
4685 instr->hydrogen()->constant_properties();
4686
4687 // Set up the parameters to the stub/runtime call.
4688 __ LoadHeapObject(r4, literals);
4689 __ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4690 __ mov(r2, Operand(constant_properties));
4691 int flags = instr->hydrogen()->fast_elements()
4692 ? ObjectLiteral::kFastElements
4693 : ObjectLiteral::kNoFlags;
4694 __ mov(r1, Operand(Smi::FromInt(flags)));
4695 __ Push(r4, r3, r2, r1);
4696
4697 // Pick the right runtime function or stub to call.
4698 int properties_count = constant_properties->length() / 2;
4699 if (instr->hydrogen()->depth() > 1) {
4700 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
4701 } else if (flags != ObjectLiteral::kFastElements ||
4702 properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
4703 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
4704 } else {
4705 FastCloneShallowObjectStub stub(properties_count);
4706 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4707 }
4708 }
4709
4710
DoToFastProperties(LToFastProperties * instr)4711 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4712 ASSERT(ToRegister(instr->InputAt(0)).is(r0));
4713 __ push(r0);
4714 CallRuntime(Runtime::kToFastProperties, 1, instr);
4715 }
4716
4717
DoRegExpLiteral(LRegExpLiteral * instr)4718 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4719 Label materialized;
4720 // Registers will be used as follows:
4721 // r3 = JS function.
4722 // r7 = literals array.
4723 // r1 = regexp literal.
4724 // r0 = regexp literal clone.
4725 // r2 and r4-r6 are used as temporaries.
4726 __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4727 __ ldr(r7, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
4728 int literal_offset = FixedArray::kHeaderSize +
4729 instr->hydrogen()->literal_index() * kPointerSize;
4730 __ ldr(r1, FieldMemOperand(r7, literal_offset));
4731 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4732 __ cmp(r1, ip);
4733 __ b(ne, &materialized);
4734
4735 // Create regexp literal using runtime function
4736 // Result will be in r0.
4737 __ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4738 __ mov(r5, Operand(instr->hydrogen()->pattern()));
4739 __ mov(r4, Operand(instr->hydrogen()->flags()));
4740 __ Push(r7, r6, r5, r4);
4741 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
4742 __ mov(r1, r0);
4743
4744 __ bind(&materialized);
4745 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
4746 Label allocated, runtime_allocate;
4747
4748 __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
4749 __ jmp(&allocated);
4750
4751 __ bind(&runtime_allocate);
4752 __ mov(r0, Operand(Smi::FromInt(size)));
4753 __ Push(r1, r0);
4754 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4755 __ pop(r1);
4756
4757 __ bind(&allocated);
4758 // Copy the content into the newly allocated memory.
4759 // (Unroll copy loop once for better throughput).
4760 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
4761 __ ldr(r3, FieldMemOperand(r1, i));
4762 __ ldr(r2, FieldMemOperand(r1, i + kPointerSize));
4763 __ str(r3, FieldMemOperand(r0, i));
4764 __ str(r2, FieldMemOperand(r0, i + kPointerSize));
4765 }
4766 if ((size % (2 * kPointerSize)) != 0) {
4767 __ ldr(r3, FieldMemOperand(r1, size - kPointerSize));
4768 __ str(r3, FieldMemOperand(r0, size - kPointerSize));
4769 }
4770 }
4771
4772
DoFunctionLiteral(LFunctionLiteral * instr)4773 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
4774 // Use the fast case closure allocation code that allocates in new
4775 // space for nested functions that don't need literals cloning.
4776 Handle<SharedFunctionInfo> shared_info = instr->shared_info();
4777 bool pretenure = instr->hydrogen()->pretenure();
4778 if (!pretenure && shared_info->num_literals() == 0) {
4779 FastNewClosureStub stub(shared_info->language_mode());
4780 __ mov(r1, Operand(shared_info));
4781 __ push(r1);
4782 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4783 } else {
4784 __ mov(r2, Operand(shared_info));
4785 __ mov(r1, Operand(pretenure
4786 ? factory()->true_value()
4787 : factory()->false_value()));
4788 __ Push(cp, r2, r1);
4789 CallRuntime(Runtime::kNewClosure, 3, instr);
4790 }
4791 }
4792
4793
DoTypeof(LTypeof * instr)4794 void LCodeGen::DoTypeof(LTypeof* instr) {
4795 Register input = ToRegister(instr->InputAt(0));
4796 __ push(input);
4797 CallRuntime(Runtime::kTypeof, 1, instr);
4798 }
4799
4800
DoTypeofIsAndBranch(LTypeofIsAndBranch * instr)4801 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
4802 Register input = ToRegister(instr->InputAt(0));
4803 int true_block = chunk_->LookupDestination(instr->true_block_id());
4804 int false_block = chunk_->LookupDestination(instr->false_block_id());
4805 Label* true_label = chunk_->GetAssemblyLabel(true_block);
4806 Label* false_label = chunk_->GetAssemblyLabel(false_block);
4807
4808 Condition final_branch_condition = EmitTypeofIs(true_label,
4809 false_label,
4810 input,
4811 instr->type_literal());
4812 if (final_branch_condition != kNoCondition) {
4813 EmitBranch(true_block, false_block, final_branch_condition);
4814 }
4815 }
4816
4817
EmitTypeofIs(Label * true_label,Label * false_label,Register input,Handle<String> type_name)4818 Condition LCodeGen::EmitTypeofIs(Label* true_label,
4819 Label* false_label,
4820 Register input,
4821 Handle<String> type_name) {
4822 Condition final_branch_condition = kNoCondition;
4823 Register scratch = scratch0();
4824 if (type_name->Equals(heap()->number_symbol())) {
4825 __ JumpIfSmi(input, true_label);
4826 __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
4827 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4828 __ cmp(input, Operand(ip));
4829 final_branch_condition = eq;
4830
4831 } else if (type_name->Equals(heap()->string_symbol())) {
4832 __ JumpIfSmi(input, false_label);
4833 __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE);
4834 __ b(ge, false_label);
4835 __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
4836 __ tst(ip, Operand(1 << Map::kIsUndetectable));
4837 final_branch_condition = eq;
4838
4839 } else if (type_name->Equals(heap()->boolean_symbol())) {
4840 __ CompareRoot(input, Heap::kTrueValueRootIndex);
4841 __ b(eq, true_label);
4842 __ CompareRoot(input, Heap::kFalseValueRootIndex);
4843 final_branch_condition = eq;
4844
4845 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
4846 __ CompareRoot(input, Heap::kNullValueRootIndex);
4847 final_branch_condition = eq;
4848
4849 } else if (type_name->Equals(heap()->undefined_symbol())) {
4850 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
4851 __ b(eq, true_label);
4852 __ JumpIfSmi(input, false_label);
4853 // Check for undetectable objects => true.
4854 __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
4855 __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
4856 __ tst(ip, Operand(1 << Map::kIsUndetectable));
4857 final_branch_condition = ne;
4858
4859 } else if (type_name->Equals(heap()->function_symbol())) {
4860 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
4861 __ JumpIfSmi(input, false_label);
4862 __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
4863 __ b(eq, true_label);
4864 __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
4865 final_branch_condition = eq;
4866
4867 } else if (type_name->Equals(heap()->object_symbol())) {
4868 __ JumpIfSmi(input, false_label);
4869 if (!FLAG_harmony_typeof) {
4870 __ CompareRoot(input, Heap::kNullValueRootIndex);
4871 __ b(eq, true_label);
4872 }
4873 __ CompareObjectType(input, input, scratch,
4874 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
4875 __ b(lt, false_label);
4876 __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
4877 __ b(gt, false_label);
4878 // Check for undetectable objects => false.
4879 __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
4880 __ tst(ip, Operand(1 << Map::kIsUndetectable));
4881 final_branch_condition = eq;
4882
4883 } else {
4884 __ b(false_label);
4885 }
4886
4887 return final_branch_condition;
4888 }
4889
4890
DoIsConstructCallAndBranch(LIsConstructCallAndBranch * instr)4891 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
4892 Register temp1 = ToRegister(instr->TempAt(0));
4893 int true_block = chunk_->LookupDestination(instr->true_block_id());
4894 int false_block = chunk_->LookupDestination(instr->false_block_id());
4895
4896 EmitIsConstructCall(temp1, scratch0());
4897 EmitBranch(true_block, false_block, eq);
4898 }
4899
4900
EmitIsConstructCall(Register temp1,Register temp2)4901 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
4902 ASSERT(!temp1.is(temp2));
4903 // Get the frame pointer for the calling frame.
4904 __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4905
4906 // Skip the arguments adaptor frame if it exists.
4907 Label check_frame_marker;
4908 __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
4909 __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4910 __ b(ne, &check_frame_marker);
4911 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
4912
4913 // Check the marker in the calling frame.
4914 __ bind(&check_frame_marker);
4915 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
4916 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
4917 }
4918
4919
EnsureSpaceForLazyDeopt()4920 void LCodeGen::EnsureSpaceForLazyDeopt() {
4921 // Ensure that we have enough space after the previous lazy-bailout
4922 // instruction for patching the code here.
4923 int current_pc = masm()->pc_offset();
4924 int patch_size = Deoptimizer::patch_size();
4925 if (current_pc < last_lazy_deopt_pc_ + patch_size) {
4926 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
4927 ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
4928 while (padding_size > 0) {
4929 __ nop();
4930 padding_size -= Assembler::kInstrSize;
4931 }
4932 }
4933 last_lazy_deopt_pc_ = masm()->pc_offset();
4934 }
4935
4936
DoLazyBailout(LLazyBailout * instr)4937 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
4938 EnsureSpaceForLazyDeopt();
4939 ASSERT(instr->HasEnvironment());
4940 LEnvironment* env = instr->environment();
4941 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4942 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4943 }
4944
4945
DoDeoptimize(LDeoptimize * instr)4946 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
4947 DeoptimizeIf(al, instr->environment());
4948 }
4949
4950
DoDeleteProperty(LDeleteProperty * instr)4951 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
4952 Register object = ToRegister(instr->object());
4953 Register key = ToRegister(instr->key());
4954 Register strict = scratch0();
4955 __ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
4956 __ Push(object, key, strict);
4957 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
4958 LPointerMap* pointers = instr->pointer_map();
4959 RecordPosition(pointers->position());
4960 SafepointGenerator safepoint_generator(
4961 this, pointers, Safepoint::kLazyDeopt);
4962 __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
4963 }
4964
4965
DoIn(LIn * instr)4966 void LCodeGen::DoIn(LIn* instr) {
4967 Register obj = ToRegister(instr->object());
4968 Register key = ToRegister(instr->key());
4969 __ Push(key, obj);
4970 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
4971 LPointerMap* pointers = instr->pointer_map();
4972 RecordPosition(pointers->position());
4973 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
4974 __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
4975 }
4976
4977
DoDeferredStackCheck(LStackCheck * instr)4978 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
4979 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4980 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
4981 RecordSafepointWithLazyDeopt(
4982 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4983 ASSERT(instr->HasEnvironment());
4984 LEnvironment* env = instr->environment();
4985 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4986 }
4987
4988
DoStackCheck(LStackCheck * instr)4989 void LCodeGen::DoStackCheck(LStackCheck* instr) {
4990 class DeferredStackCheck: public LDeferredCode {
4991 public:
4992 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
4993 : LDeferredCode(codegen), instr_(instr) { }
4994 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
4995 virtual LInstruction* instr() { return instr_; }
4996 private:
4997 LStackCheck* instr_;
4998 };
4999
5000 ASSERT(instr->HasEnvironment());
5001 LEnvironment* env = instr->environment();
5002 // There is no LLazyBailout instruction for stack-checks. We have to
5003 // prepare for lazy deoptimization explicitly here.
5004 if (instr->hydrogen()->is_function_entry()) {
5005 // Perform stack overflow check.
5006 Label done;
5007 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5008 __ cmp(sp, Operand(ip));
5009 __ b(hs, &done);
5010 StackCheckStub stub;
5011 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5012 EnsureSpaceForLazyDeopt();
5013 __ bind(&done);
5014 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5015 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5016 } else {
5017 ASSERT(instr->hydrogen()->is_backwards_branch());
5018 // Perform stack overflow check if this goto needs it before jumping.
5019 DeferredStackCheck* deferred_stack_check =
5020 new DeferredStackCheck(this, instr);
5021 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5022 __ cmp(sp, Operand(ip));
5023 __ b(lo, deferred_stack_check->entry());
5024 EnsureSpaceForLazyDeopt();
5025 __ bind(instr->done_label());
5026 deferred_stack_check->SetExit(instr->done_label());
5027 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5028 // Don't record a deoptimization index for the safepoint here.
5029 // This will be done explicitly when emitting call and the safepoint in
5030 // the deferred code.
5031 }
5032 }
5033
5034
DoOsrEntry(LOsrEntry * instr)5035 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5036 // This is a pseudo-instruction that ensures that the environment here is
5037 // properly registered for deoptimization and records the assembler's PC
5038 // offset.
5039 LEnvironment* environment = instr->environment();
5040 environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
5041 instr->SpilledDoubleRegisterArray());
5042
5043 // If the environment were already registered, we would have no way of
5044 // backpatching it with the spill slot operands.
5045 ASSERT(!environment->HasBeenRegistered());
5046 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5047 ASSERT(osr_pc_offset_ == -1);
5048 osr_pc_offset_ = masm()->pc_offset();
5049 }
5050
5051
DoForInPrepareMap(LForInPrepareMap * instr)5052 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5053 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5054 __ cmp(r0, ip);
5055 DeoptimizeIf(eq, instr->environment());
5056
5057 Register null_value = r5;
5058 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5059 __ cmp(r0, null_value);
5060 DeoptimizeIf(eq, instr->environment());
5061
5062 __ tst(r0, Operand(kSmiTagMask));
5063 DeoptimizeIf(eq, instr->environment());
5064
5065 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5066 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
5067 DeoptimizeIf(le, instr->environment());
5068
5069 Label use_cache, call_runtime;
5070 __ CheckEnumCache(null_value, &call_runtime);
5071
5072 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
5073 __ b(&use_cache);
5074
5075 // Get the set of properties to enumerate.
5076 __ bind(&call_runtime);
5077 __ push(r0);
5078 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5079
5080 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
5081 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
5082 __ cmp(r1, ip);
5083 DeoptimizeIf(ne, instr->environment());
5084 __ bind(&use_cache);
5085 }
5086
5087
DoForInCacheArray(LForInCacheArray * instr)5088 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5089 Register map = ToRegister(instr->map());
5090 Register result = ToRegister(instr->result());
5091 __ LoadInstanceDescriptors(map, result);
5092 __ ldr(result,
5093 FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
5094 __ ldr(result,
5095 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5096 __ cmp(result, Operand(0));
5097 DeoptimizeIf(eq, instr->environment());
5098 }
5099
5100
DoCheckMapValue(LCheckMapValue * instr)5101 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5102 Register object = ToRegister(instr->value());
5103 Register map = ToRegister(instr->map());
5104 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5105 __ cmp(map, scratch0());
5106 DeoptimizeIf(ne, instr->environment());
5107 }
5108
5109
DoLoadFieldByIndex(LLoadFieldByIndex * instr)5110 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5111 Register object = ToRegister(instr->object());
5112 Register index = ToRegister(instr->index());
5113 Register result = ToRegister(instr->result());
5114 Register scratch = scratch0();
5115
5116 Label out_of_object, done;
5117 __ cmp(index, Operand(0));
5118 __ b(lt, &out_of_object);
5119
5120 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5121 __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
5122 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5123
5124 __ b(&done);
5125
5126 __ bind(&out_of_object);
5127 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5128 // Index is equal to negated out of object property index plus 1.
5129 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
5130 __ ldr(result, FieldMemOperand(scratch,
5131 FixedArray::kHeaderSize - kPointerSize));
5132 __ bind(&done);
5133 }
5134
5135
5136 #undef __
5137
5138 } } // namespace v8::internal
5139