• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <limits.h>  // For LONG_MIN, LONG_MAX.
6 
7 #include "src/v8.h"
8 
9 #if V8_TARGET_ARCH_MIPS
10 
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/cpu-profiler.h"
14 #include "src/debug.h"
15 #include "src/isolate-inl.h"
16 #include "src/runtime.h"
17 
18 namespace v8 {
19 namespace internal {
20 
MacroAssembler(Isolate * arg_isolate,void * buffer,int size)21 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
22     : Assembler(arg_isolate, buffer, size),
23       generating_stub_(false),
24       has_frame_(false) {
25   if (isolate() != NULL) {
26     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
27                                   isolate());
28   }
29 }
30 
31 
Load(Register dst,const MemOperand & src,Representation r)32 void MacroAssembler::Load(Register dst,
33                           const MemOperand& src,
34                           Representation r) {
35   ASSERT(!r.IsDouble());
36   if (r.IsInteger8()) {
37     lb(dst, src);
38   } else if (r.IsUInteger8()) {
39     lbu(dst, src);
40   } else if (r.IsInteger16()) {
41     lh(dst, src);
42   } else if (r.IsUInteger16()) {
43     lhu(dst, src);
44   } else {
45     lw(dst, src);
46   }
47 }
48 
49 
Store(Register src,const MemOperand & dst,Representation r)50 void MacroAssembler::Store(Register src,
51                            const MemOperand& dst,
52                            Representation r) {
53   ASSERT(!r.IsDouble());
54   if (r.IsInteger8() || r.IsUInteger8()) {
55     sb(src, dst);
56   } else if (r.IsInteger16() || r.IsUInteger16()) {
57     sh(src, dst);
58   } else {
59     if (r.IsHeapObject()) {
60       AssertNotSmi(src);
61     } else if (r.IsSmi()) {
62       AssertSmi(src);
63     }
64     sw(src, dst);
65   }
66 }
67 
68 
LoadRoot(Register destination,Heap::RootListIndex index)69 void MacroAssembler::LoadRoot(Register destination,
70                               Heap::RootListIndex index) {
71   lw(destination, MemOperand(s6, index << kPointerSizeLog2));
72 }
73 
74 
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)75 void MacroAssembler::LoadRoot(Register destination,
76                               Heap::RootListIndex index,
77                               Condition cond,
78                               Register src1, const Operand& src2) {
79   Branch(2, NegateCondition(cond), src1, src2);
80   lw(destination, MemOperand(s6, index << kPointerSizeLog2));
81 }
82 
83 
StoreRoot(Register source,Heap::RootListIndex index)84 void MacroAssembler::StoreRoot(Register source,
85                                Heap::RootListIndex index) {
86   sw(source, MemOperand(s6, index << kPointerSizeLog2));
87 }
88 
89 
StoreRoot(Register source,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)90 void MacroAssembler::StoreRoot(Register source,
91                                Heap::RootListIndex index,
92                                Condition cond,
93                                Register src1, const Operand& src2) {
94   Branch(2, NegateCondition(cond), src1, src2);
95   sw(source, MemOperand(s6, index << kPointerSizeLog2));
96 }
97 
98 
99 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()100 void MacroAssembler::PushSafepointRegisters() {
101   // Safepoints expect a block of kNumSafepointRegisters values on the
102   // stack, so adjust the stack for unsaved registers.
103   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
104   ASSERT(num_unsaved >= 0);
105   if (num_unsaved > 0) {
106     Subu(sp, sp, Operand(num_unsaved * kPointerSize));
107   }
108   MultiPush(kSafepointSavedRegisters);
109 }
110 
111 
PopSafepointRegisters()112 void MacroAssembler::PopSafepointRegisters() {
113   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
114   MultiPop(kSafepointSavedRegisters);
115   if (num_unsaved > 0) {
116     Addu(sp, sp, Operand(num_unsaved * kPointerSize));
117   }
118 }
119 
120 
PushSafepointRegistersAndDoubles()121 void MacroAssembler::PushSafepointRegistersAndDoubles() {
122   PushSafepointRegisters();
123   Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
124   for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
125     FPURegister reg = FPURegister::FromAllocationIndex(i);
126     sdc1(reg, MemOperand(sp, i * kDoubleSize));
127   }
128 }
129 
130 
PopSafepointRegistersAndDoubles()131 void MacroAssembler::PopSafepointRegistersAndDoubles() {
132   for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
133     FPURegister reg = FPURegister::FromAllocationIndex(i);
134     ldc1(reg, MemOperand(sp, i * kDoubleSize));
135   }
136   Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
137   PopSafepointRegisters();
138 }
139 
140 
StoreToSafepointRegistersAndDoublesSlot(Register src,Register dst)141 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
142                                                              Register dst) {
143   sw(src, SafepointRegistersAndDoublesSlot(dst));
144 }
145 
146 
StoreToSafepointRegisterSlot(Register src,Register dst)147 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
148   sw(src, SafepointRegisterSlot(dst));
149 }
150 
151 
LoadFromSafepointRegisterSlot(Register dst,Register src)152 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
153   lw(dst, SafepointRegisterSlot(src));
154 }
155 
156 
SafepointRegisterStackIndex(int reg_code)157 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
158   // The registers are pushed starting with the highest encoding,
159   // which means that lowest encodings are closest to the stack pointer.
160   return kSafepointRegisterStackIndexMap[reg_code];
161 }
162 
163 
SafepointRegisterSlot(Register reg)164 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
165   return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
166 }
167 
168 
SafepointRegistersAndDoublesSlot(Register reg)169 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
170   UNIMPLEMENTED_MIPS();
171   // General purpose registers are pushed last on the stack.
172   int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
173   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
174   return MemOperand(sp, doubles_size + register_offset);
175 }
176 
177 
InNewSpace(Register object,Register scratch,Condition cc,Label * branch)178 void MacroAssembler::InNewSpace(Register object,
179                                 Register scratch,
180                                 Condition cc,
181                                 Label* branch) {
182   ASSERT(cc == eq || cc == ne);
183   And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
184   Branch(branch, cc, scratch,
185          Operand(ExternalReference::new_space_start(isolate())));
186 }
187 
188 
RecordWriteField(Register object,int offset,Register value,Register dst,RAStatus ra_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)189 void MacroAssembler::RecordWriteField(
190     Register object,
191     int offset,
192     Register value,
193     Register dst,
194     RAStatus ra_status,
195     SaveFPRegsMode save_fp,
196     RememberedSetAction remembered_set_action,
197     SmiCheck smi_check,
198     PointersToHereCheck pointers_to_here_check_for_value) {
199   ASSERT(!AreAliased(value, dst, t8, object));
200   // First, check if a write barrier is even needed. The tests below
201   // catch stores of Smis.
202   Label done;
203 
204   // Skip barrier if writing a smi.
205   if (smi_check == INLINE_SMI_CHECK) {
206     JumpIfSmi(value, &done);
207   }
208 
209   // Although the object register is tagged, the offset is relative to the start
210   // of the object, so so offset must be a multiple of kPointerSize.
211   ASSERT(IsAligned(offset, kPointerSize));
212 
213   Addu(dst, object, Operand(offset - kHeapObjectTag));
214   if (emit_debug_code()) {
215     Label ok;
216     And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
217     Branch(&ok, eq, t8, Operand(zero_reg));
218     stop("Unaligned cell in write barrier");
219     bind(&ok);
220   }
221 
222   RecordWrite(object,
223               dst,
224               value,
225               ra_status,
226               save_fp,
227               remembered_set_action,
228               OMIT_SMI_CHECK,
229               pointers_to_here_check_for_value);
230 
231   bind(&done);
232 
233   // Clobber clobbered input registers when running with the debug-code flag
234   // turned on to provoke errors.
235   if (emit_debug_code()) {
236     li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
237     li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
238   }
239 }
240 
241 
242 // Will clobber 4 registers: object, map, dst, ip.  The
243 // register 'object' contains a heap object pointer.
RecordWriteForMap(Register object,Register map,Register dst,RAStatus ra_status,SaveFPRegsMode fp_mode)244 void MacroAssembler::RecordWriteForMap(Register object,
245                                        Register map,
246                                        Register dst,
247                                        RAStatus ra_status,
248                                        SaveFPRegsMode fp_mode) {
249   if (emit_debug_code()) {
250     ASSERT(!dst.is(at));
251     lw(dst, FieldMemOperand(map, HeapObject::kMapOffset));
252     Check(eq,
253           kWrongAddressOrValuePassedToRecordWrite,
254           dst,
255           Operand(isolate()->factory()->meta_map()));
256   }
257 
258   if (!FLAG_incremental_marking) {
259     return;
260   }
261 
262   // Count number of write barriers in generated code.
263   isolate()->counters()->write_barriers_static()->Increment();
264   // TODO(mstarzinger): Dynamic counter missing.
265 
266   if (emit_debug_code()) {
267     lw(at, FieldMemOperand(object, HeapObject::kMapOffset));
268     Check(eq,
269           kWrongAddressOrValuePassedToRecordWrite,
270           map,
271           Operand(at));
272   }
273 
274   Label done;
275 
276   // A single check of the map's pages interesting flag suffices, since it is
277   // only set during incremental collection, and then it's also guaranteed that
278   // the from object's page's interesting flag is also set.  This optimization
279   // relies on the fact that maps can never be in new space.
280   CheckPageFlag(map,
281                 map,  // Used as scratch.
282                 MemoryChunk::kPointersToHereAreInterestingMask,
283                 eq,
284                 &done);
285 
286   Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
287   if (emit_debug_code()) {
288     Label ok;
289     And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
290     Branch(&ok, eq, at, Operand(zero_reg));
291     stop("Unaligned cell in write barrier");
292     bind(&ok);
293   }
294 
295   // Record the actual write.
296   if (ra_status == kRAHasNotBeenSaved) {
297     push(ra);
298   }
299   RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
300                        fp_mode);
301   CallStub(&stub);
302   if (ra_status == kRAHasNotBeenSaved) {
303     pop(ra);
304   }
305 
306   bind(&done);
307 
308   // Clobber clobbered registers when running with the debug-code flag
309   // turned on to provoke errors.
310   if (emit_debug_code()) {
311     li(dst, Operand(BitCast<int32_t>(kZapValue + 12)));
312     li(map, Operand(BitCast<int32_t>(kZapValue + 16)));
313   }
314 }
315 
316 
317 // Will clobber 4 registers: object, address, scratch, ip.  The
318 // register 'object' contains a heap object pointer.  The heap object
319 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,RAStatus ra_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)320 void MacroAssembler::RecordWrite(
321     Register object,
322     Register address,
323     Register value,
324     RAStatus ra_status,
325     SaveFPRegsMode fp_mode,
326     RememberedSetAction remembered_set_action,
327     SmiCheck smi_check,
328     PointersToHereCheck pointers_to_here_check_for_value) {
329   ASSERT(!AreAliased(object, address, value, t8));
330   ASSERT(!AreAliased(object, address, value, t9));
331 
332   if (emit_debug_code()) {
333     lw(at, MemOperand(address));
334     Assert(
335         eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
336   }
337 
338   if (remembered_set_action == OMIT_REMEMBERED_SET &&
339       !FLAG_incremental_marking) {
340     return;
341   }
342 
343   // Count number of write barriers in generated code.
344   isolate()->counters()->write_barriers_static()->Increment();
345   // TODO(mstarzinger): Dynamic counter missing.
346 
347   // First, check if a write barrier is even needed. The tests below
348   // catch stores of smis and stores into the young generation.
349   Label done;
350 
351   if (smi_check == INLINE_SMI_CHECK) {
352     ASSERT_EQ(0, kSmiTag);
353     JumpIfSmi(value, &done);
354   }
355 
356   if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
357     CheckPageFlag(value,
358                   value,  // Used as scratch.
359                   MemoryChunk::kPointersToHereAreInterestingMask,
360                   eq,
361                   &done);
362   }
363   CheckPageFlag(object,
364                 value,  // Used as scratch.
365                 MemoryChunk::kPointersFromHereAreInterestingMask,
366                 eq,
367                 &done);
368 
369   // Record the actual write.
370   if (ra_status == kRAHasNotBeenSaved) {
371     push(ra);
372   }
373   RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
374                        fp_mode);
375   CallStub(&stub);
376   if (ra_status == kRAHasNotBeenSaved) {
377     pop(ra);
378   }
379 
380   bind(&done);
381 
382   // Clobber clobbered registers when running with the debug-code flag
383   // turned on to provoke errors.
384   if (emit_debug_code()) {
385     li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
386     li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
387   }
388 }
389 
390 
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)391 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
392                                          Register address,
393                                          Register scratch,
394                                          SaveFPRegsMode fp_mode,
395                                          RememberedSetFinalAction and_then) {
396   Label done;
397   if (emit_debug_code()) {
398     Label ok;
399     JumpIfNotInNewSpace(object, scratch, &ok);
400     stop("Remembered set pointer is in new space");
401     bind(&ok);
402   }
403   // Load store buffer top.
404   ExternalReference store_buffer =
405       ExternalReference::store_buffer_top(isolate());
406   li(t8, Operand(store_buffer));
407   lw(scratch, MemOperand(t8));
408   // Store pointer to buffer and increment buffer top.
409   sw(address, MemOperand(scratch));
410   Addu(scratch, scratch, kPointerSize);
411   // Write back new top of buffer.
412   sw(scratch, MemOperand(t8));
413   // Call stub on end of buffer.
414   // Check for end of buffer.
415   And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
416   if (and_then == kFallThroughAtEnd) {
417     Branch(&done, eq, t8, Operand(zero_reg));
418   } else {
419     ASSERT(and_then == kReturnAtEnd);
420     Ret(eq, t8, Operand(zero_reg));
421   }
422   push(ra);
423   StoreBufferOverflowStub store_buffer_overflow =
424       StoreBufferOverflowStub(isolate(), fp_mode);
425   CallStub(&store_buffer_overflow);
426   pop(ra);
427   bind(&done);
428   if (and_then == kReturnAtEnd) {
429     Ret();
430   }
431 }
432 
433 
434 // -----------------------------------------------------------------------------
435 // Allocation support.
436 
437 
CheckAccessGlobalProxy(Register holder_reg,Register scratch,Label * miss)438 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
439                                             Register scratch,
440                                             Label* miss) {
441   Label same_contexts;
442 
443   ASSERT(!holder_reg.is(scratch));
444   ASSERT(!holder_reg.is(at));
445   ASSERT(!scratch.is(at));
446 
447   // Load current lexical context from the stack frame.
448   lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
449   // In debug mode, make sure the lexical context is set.
450 #ifdef DEBUG
451   Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
452       scratch, Operand(zero_reg));
453 #endif
454 
455   // Load the native context of the current context.
456   int offset =
457       Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
458   lw(scratch, FieldMemOperand(scratch, offset));
459   lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
460 
461   // Check the context is a native context.
462   if (emit_debug_code()) {
463     push(holder_reg);  // Temporarily save holder on the stack.
464     // Read the first word and compare to the native_context_map.
465     lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
466     LoadRoot(at, Heap::kNativeContextMapRootIndex);
467     Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
468           holder_reg, Operand(at));
469     pop(holder_reg);  // Restore holder.
470   }
471 
472   // Check if both contexts are the same.
473   lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
474   Branch(&same_contexts, eq, scratch, Operand(at));
475 
476   // Check the context is a native context.
477   if (emit_debug_code()) {
478     push(holder_reg);  // Temporarily save holder on the stack.
479     mov(holder_reg, at);  // Move at to its holding place.
480     LoadRoot(at, Heap::kNullValueRootIndex);
481     Check(ne, kJSGlobalProxyContextShouldNotBeNull,
482           holder_reg, Operand(at));
483 
484     lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
485     LoadRoot(at, Heap::kNativeContextMapRootIndex);
486     Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
487           holder_reg, Operand(at));
488     // Restore at is not needed. at is reloaded below.
489     pop(holder_reg);  // Restore holder.
490     // Restore at to holder's context.
491     lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
492   }
493 
494   // Check that the security token in the calling global object is
495   // compatible with the security token in the receiving global
496   // object.
497   int token_offset = Context::kHeaderSize +
498                      Context::SECURITY_TOKEN_INDEX * kPointerSize;
499 
500   lw(scratch, FieldMemOperand(scratch, token_offset));
501   lw(at, FieldMemOperand(at, token_offset));
502   Branch(miss, ne, scratch, Operand(at));
503 
504   bind(&same_contexts);
505 }
506 
507 
GetNumberHash(Register reg0,Register scratch)508 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
509   // First of all we assign the hash seed to scratch.
510   LoadRoot(scratch, Heap::kHashSeedRootIndex);
511   SmiUntag(scratch);
512 
513   // Xor original key with a seed.
514   xor_(reg0, reg0, scratch);
515 
516   // Compute the hash code from the untagged key.  This must be kept in sync
517   // with ComputeIntegerHash in utils.h.
518   //
519   // hash = ~hash + (hash << 15);
520   nor(scratch, reg0, zero_reg);
521   sll(at, reg0, 15);
522   addu(reg0, scratch, at);
523 
524   // hash = hash ^ (hash >> 12);
525   srl(at, reg0, 12);
526   xor_(reg0, reg0, at);
527 
528   // hash = hash + (hash << 2);
529   sll(at, reg0, 2);
530   addu(reg0, reg0, at);
531 
532   // hash = hash ^ (hash >> 4);
533   srl(at, reg0, 4);
534   xor_(reg0, reg0, at);
535 
536   // hash = hash * 2057;
537   sll(scratch, reg0, 11);
538   sll(at, reg0, 3);
539   addu(reg0, reg0, at);
540   addu(reg0, reg0, scratch);
541 
542   // hash = hash ^ (hash >> 16);
543   srl(at, reg0, 16);
544   xor_(reg0, reg0, at);
545 }
546 
547 
LoadFromNumberDictionary(Label * miss,Register elements,Register key,Register result,Register reg0,Register reg1,Register reg2)548 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
549                                               Register elements,
550                                               Register key,
551                                               Register result,
552                                               Register reg0,
553                                               Register reg1,
554                                               Register reg2) {
555   // Register use:
556   //
557   // elements - holds the slow-case elements of the receiver on entry.
558   //            Unchanged unless 'result' is the same register.
559   //
560   // key      - holds the smi key on entry.
561   //            Unchanged unless 'result' is the same register.
562   //
563   //
564   // result   - holds the result on exit if the load succeeded.
565   //            Allowed to be the same as 'key' or 'result'.
566   //            Unchanged on bailout so 'key' or 'result' can be used
567   //            in further computation.
568   //
569   // Scratch registers:
570   //
571   // reg0 - holds the untagged key on entry and holds the hash once computed.
572   //
573   // reg1 - Used to hold the capacity mask of the dictionary.
574   //
575   // reg2 - Used for the index into the dictionary.
576   // at   - Temporary (avoid MacroAssembler instructions also using 'at').
577   Label done;
578 
579   GetNumberHash(reg0, reg1);
580 
581   // Compute the capacity mask.
582   lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
583   sra(reg1, reg1, kSmiTagSize);
584   Subu(reg1, reg1, Operand(1));
585 
586   // Generate an unrolled loop that performs a few probes before giving up.
587   for (int i = 0; i < kNumberDictionaryProbes; i++) {
588     // Use reg2 for index calculations and keep the hash intact in reg0.
589     mov(reg2, reg0);
590     // Compute the masked index: (hash + i + i * i) & mask.
591     if (i > 0) {
592       Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
593     }
594     and_(reg2, reg2, reg1);
595 
596     // Scale the index by multiplying by the element size.
597     ASSERT(SeededNumberDictionary::kEntrySize == 3);
598     sll(at, reg2, 1);  // 2x.
599     addu(reg2, reg2, at);  // reg2 = reg2 * 3.
600 
601     // Check if the key is identical to the name.
602     sll(at, reg2, kPointerSizeLog2);
603     addu(reg2, elements, at);
604 
605     lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
606     if (i != kNumberDictionaryProbes - 1) {
607       Branch(&done, eq, key, Operand(at));
608     } else {
609       Branch(miss, ne, key, Operand(at));
610     }
611   }
612 
613   bind(&done);
614   // Check that the value is a normal property.
615   // reg2: elements + (index * kPointerSize).
616   const int kDetailsOffset =
617       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
618   lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
619   And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
620   Branch(miss, ne, at, Operand(zero_reg));
621 
622   // Get the value at the masked, scaled index and return.
623   const int kValueOffset =
624       SeededNumberDictionary::kElementsStartOffset + kPointerSize;
625   lw(result, FieldMemOperand(reg2, kValueOffset));
626 }
627 
628 
629 // ---------------------------------------------------------------------------
630 // Instruction macros.
631 
Addu(Register rd,Register rs,const Operand & rt)632 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
633   if (rt.is_reg()) {
634     addu(rd, rs, rt.rm());
635   } else {
636     if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
637       addiu(rd, rs, rt.imm32_);
638     } else {
639       // li handles the relocation.
640       ASSERT(!rs.is(at));
641       li(at, rt);
642       addu(rd, rs, at);
643     }
644   }
645 }
646 
647 
Subu(Register rd,Register rs,const Operand & rt)648 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
649   if (rt.is_reg()) {
650     subu(rd, rs, rt.rm());
651   } else {
652     if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
653       addiu(rd, rs, -rt.imm32_);  // No subiu instr, use addiu(x, y, -imm).
654     } else {
655       // li handles the relocation.
656       ASSERT(!rs.is(at));
657       li(at, rt);
658       subu(rd, rs, at);
659     }
660   }
661 }
662 
663 
Mul(Register rd,Register rs,const Operand & rt)664 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
665   if (rt.is_reg()) {
666     if (kArchVariant == kLoongson) {
667       mult(rs, rt.rm());
668       mflo(rd);
669     } else {
670       mul(rd, rs, rt.rm());
671     }
672   } else {
673     // li handles the relocation.
674     ASSERT(!rs.is(at));
675     li(at, rt);
676     if (kArchVariant == kLoongson) {
677       mult(rs, at);
678       mflo(rd);
679     } else {
680       mul(rd, rs, at);
681     }
682   }
683 }
684 
685 
Mult(Register rs,const Operand & rt)686 void MacroAssembler::Mult(Register rs, const Operand& rt) {
687   if (rt.is_reg()) {
688     mult(rs, rt.rm());
689   } else {
690     // li handles the relocation.
691     ASSERT(!rs.is(at));
692     li(at, rt);
693     mult(rs, at);
694   }
695 }
696 
697 
Multu(Register rs,const Operand & rt)698 void MacroAssembler::Multu(Register rs, const Operand& rt) {
699   if (rt.is_reg()) {
700     multu(rs, rt.rm());
701   } else {
702     // li handles the relocation.
703     ASSERT(!rs.is(at));
704     li(at, rt);
705     multu(rs, at);
706   }
707 }
708 
709 
Div(Register rs,const Operand & rt)710 void MacroAssembler::Div(Register rs, const Operand& rt) {
711   if (rt.is_reg()) {
712     div(rs, rt.rm());
713   } else {
714     // li handles the relocation.
715     ASSERT(!rs.is(at));
716     li(at, rt);
717     div(rs, at);
718   }
719 }
720 
721 
Divu(Register rs,const Operand & rt)722 void MacroAssembler::Divu(Register rs, const Operand& rt) {
723   if (rt.is_reg()) {
724     divu(rs, rt.rm());
725   } else {
726     // li handles the relocation.
727     ASSERT(!rs.is(at));
728     li(at, rt);
729     divu(rs, at);
730   }
731 }
732 
733 
And(Register rd,Register rs,const Operand & rt)734 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
735   if (rt.is_reg()) {
736     and_(rd, rs, rt.rm());
737   } else {
738     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
739       andi(rd, rs, rt.imm32_);
740     } else {
741       // li handles the relocation.
742       ASSERT(!rs.is(at));
743       li(at, rt);
744       and_(rd, rs, at);
745     }
746   }
747 }
748 
749 
Or(Register rd,Register rs,const Operand & rt)750 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
751   if (rt.is_reg()) {
752     or_(rd, rs, rt.rm());
753   } else {
754     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
755       ori(rd, rs, rt.imm32_);
756     } else {
757       // li handles the relocation.
758       ASSERT(!rs.is(at));
759       li(at, rt);
760       or_(rd, rs, at);
761     }
762   }
763 }
764 
765 
Xor(Register rd,Register rs,const Operand & rt)766 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
767   if (rt.is_reg()) {
768     xor_(rd, rs, rt.rm());
769   } else {
770     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
771       xori(rd, rs, rt.imm32_);
772     } else {
773       // li handles the relocation.
774       ASSERT(!rs.is(at));
775       li(at, rt);
776       xor_(rd, rs, at);
777     }
778   }
779 }
780 
781 
Nor(Register rd,Register rs,const Operand & rt)782 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
783   if (rt.is_reg()) {
784     nor(rd, rs, rt.rm());
785   } else {
786     // li handles the relocation.
787     ASSERT(!rs.is(at));
788     li(at, rt);
789     nor(rd, rs, at);
790   }
791 }
792 
793 
Neg(Register rs,const Operand & rt)794 void MacroAssembler::Neg(Register rs, const Operand& rt) {
795   ASSERT(rt.is_reg());
796   ASSERT(!at.is(rs));
797   ASSERT(!at.is(rt.rm()));
798   li(at, -1);
799   xor_(rs, rt.rm(), at);
800 }
801 
802 
Slt(Register rd,Register rs,const Operand & rt)803 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
804   if (rt.is_reg()) {
805     slt(rd, rs, rt.rm());
806   } else {
807     if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
808       slti(rd, rs, rt.imm32_);
809     } else {
810       // li handles the relocation.
811       ASSERT(!rs.is(at));
812       li(at, rt);
813       slt(rd, rs, at);
814     }
815   }
816 }
817 
818 
Sltu(Register rd,Register rs,const Operand & rt)819 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
820   if (rt.is_reg()) {
821     sltu(rd, rs, rt.rm());
822   } else {
823     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
824       sltiu(rd, rs, rt.imm32_);
825     } else {
826       // li handles the relocation.
827       ASSERT(!rs.is(at));
828       li(at, rt);
829       sltu(rd, rs, at);
830     }
831   }
832 }
833 
834 
Ror(Register rd,Register rs,const Operand & rt)835 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
836   if (kArchVariant == kMips32r2) {
837     if (rt.is_reg()) {
838       rotrv(rd, rs, rt.rm());
839     } else {
840       rotr(rd, rs, rt.imm32_);
841     }
842   } else {
843     if (rt.is_reg()) {
844       subu(at, zero_reg, rt.rm());
845       sllv(at, rs, at);
846       srlv(rd, rs, rt.rm());
847       or_(rd, rd, at);
848     } else {
849       if (rt.imm32_ == 0) {
850         srl(rd, rs, 0);
851       } else {
852         srl(at, rs, rt.imm32_);
853         sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
854         or_(rd, rd, at);
855       }
856     }
857   }
858 }
859 
860 
Pref(int32_t hint,const MemOperand & rs)861 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
862   if (kArchVariant == kLoongson) {
863     lw(zero_reg, rs);
864   } else {
865     pref(hint, rs);
866   }
867 }
868 
869 
870 // ------------Pseudo-instructions-------------
871 
Ulw(Register rd,const MemOperand & rs)872 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
873   lwr(rd, rs);
874   lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
875 }
876 
877 
Usw(Register rd,const MemOperand & rs)878 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
879   swr(rd, rs);
880   swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
881 }
882 
883 
li(Register dst,Handle<Object> value,LiFlags mode)884 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
885   AllowDeferredHandleDereference smi_check;
886   if (value->IsSmi()) {
887     li(dst, Operand(value), mode);
888   } else {
889     ASSERT(value->IsHeapObject());
890     if (isolate()->heap()->InNewSpace(*value)) {
891       Handle<Cell> cell = isolate()->factory()->NewCell(value);
892       li(dst, Operand(cell));
893       lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
894     } else {
895       li(dst, Operand(value));
896     }
897   }
898 }
899 
900 
li(Register rd,Operand j,LiFlags mode)901 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
902   ASSERT(!j.is_reg());
903   BlockTrampolinePoolScope block_trampoline_pool(this);
904   if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
905     // Normal load of an immediate value which does not need Relocation Info.
906     if (is_int16(j.imm32_)) {
907       addiu(rd, zero_reg, j.imm32_);
908     } else if (!(j.imm32_ & kHiMask)) {
909       ori(rd, zero_reg, j.imm32_);
910     } else if (!(j.imm32_ & kImm16Mask)) {
911       lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
912     } else {
913       lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
914       ori(rd, rd, (j.imm32_ & kImm16Mask));
915     }
916   } else {
917     if (MustUseReg(j.rmode_)) {
918       RecordRelocInfo(j.rmode_, j.imm32_);
919     }
920     // We always need the same number of instructions as we may need to patch
921     // this code to load another value which may need 2 instructions to load.
922     lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
923     ori(rd, rd, (j.imm32_ & kImm16Mask));
924   }
925 }
926 
927 
MultiPush(RegList regs)928 void MacroAssembler::MultiPush(RegList regs) {
929   int16_t num_to_push = NumberOfBitsSet(regs);
930   int16_t stack_offset = num_to_push * kPointerSize;
931 
932   Subu(sp, sp, Operand(stack_offset));
933   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
934     if ((regs & (1 << i)) != 0) {
935       stack_offset -= kPointerSize;
936       sw(ToRegister(i), MemOperand(sp, stack_offset));
937     }
938   }
939 }
940 
941 
MultiPushReversed(RegList regs)942 void MacroAssembler::MultiPushReversed(RegList regs) {
943   int16_t num_to_push = NumberOfBitsSet(regs);
944   int16_t stack_offset = num_to_push * kPointerSize;
945 
946   Subu(sp, sp, Operand(stack_offset));
947   for (int16_t i = 0; i < kNumRegisters; i++) {
948     if ((regs & (1 << i)) != 0) {
949       stack_offset -= kPointerSize;
950       sw(ToRegister(i), MemOperand(sp, stack_offset));
951     }
952   }
953 }
954 
955 
MultiPop(RegList regs)956 void MacroAssembler::MultiPop(RegList regs) {
957   int16_t stack_offset = 0;
958 
959   for (int16_t i = 0; i < kNumRegisters; i++) {
960     if ((regs & (1 << i)) != 0) {
961       lw(ToRegister(i), MemOperand(sp, stack_offset));
962       stack_offset += kPointerSize;
963     }
964   }
965   addiu(sp, sp, stack_offset);
966 }
967 
968 
MultiPopReversed(RegList regs)969 void MacroAssembler::MultiPopReversed(RegList regs) {
970   int16_t stack_offset = 0;
971 
972   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
973     if ((regs & (1 << i)) != 0) {
974       lw(ToRegister(i), MemOperand(sp, stack_offset));
975       stack_offset += kPointerSize;
976     }
977   }
978   addiu(sp, sp, stack_offset);
979 }
980 
981 
MultiPushFPU(RegList regs)982 void MacroAssembler::MultiPushFPU(RegList regs) {
983   int16_t num_to_push = NumberOfBitsSet(regs);
984   int16_t stack_offset = num_to_push * kDoubleSize;
985 
986   Subu(sp, sp, Operand(stack_offset));
987   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
988     if ((regs & (1 << i)) != 0) {
989       stack_offset -= kDoubleSize;
990       sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
991     }
992   }
993 }
994 
995 
MultiPushReversedFPU(RegList regs)996 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
997   int16_t num_to_push = NumberOfBitsSet(regs);
998   int16_t stack_offset = num_to_push * kDoubleSize;
999 
1000   Subu(sp, sp, Operand(stack_offset));
1001   for (int16_t i = 0; i < kNumRegisters; i++) {
1002     if ((regs & (1 << i)) != 0) {
1003       stack_offset -= kDoubleSize;
1004       sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1005     }
1006   }
1007 }
1008 
1009 
MultiPopFPU(RegList regs)1010 void MacroAssembler::MultiPopFPU(RegList regs) {
1011   int16_t stack_offset = 0;
1012 
1013   for (int16_t i = 0; i < kNumRegisters; i++) {
1014     if ((regs & (1 << i)) != 0) {
1015       ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1016       stack_offset += kDoubleSize;
1017     }
1018   }
1019   addiu(sp, sp, stack_offset);
1020 }
1021 
1022 
MultiPopReversedFPU(RegList regs)1023 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1024   int16_t stack_offset = 0;
1025 
1026   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1027     if ((regs & (1 << i)) != 0) {
1028       ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1029       stack_offset += kDoubleSize;
1030     }
1031   }
1032   addiu(sp, sp, stack_offset);
1033 }
1034 
1035 
FlushICache(Register address,unsigned instructions)1036 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
1037   RegList saved_regs = kJSCallerSaved | ra.bit();
1038   MultiPush(saved_regs);
1039   AllowExternalCallThatCantCauseGC scope(this);
1040 
1041   // Save to a0 in case address == t0.
1042   Move(a0, address);
1043   PrepareCallCFunction(2, t0);
1044 
1045   li(a1, instructions * kInstrSize);
1046   CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
1047   MultiPop(saved_regs);
1048 }
1049 
1050 
Ext(Register rt,Register rs,uint16_t pos,uint16_t size)1051 void MacroAssembler::Ext(Register rt,
1052                          Register rs,
1053                          uint16_t pos,
1054                          uint16_t size) {
1055   ASSERT(pos < 32);
1056   ASSERT(pos + size < 33);
1057 
1058   if (kArchVariant == kMips32r2) {
1059     ext_(rt, rs, pos, size);
1060   } else {
1061     // Move rs to rt and shift it left then right to get the
1062     // desired bitfield on the right side and zeroes on the left.
1063     int shift_left = 32 - (pos + size);
1064     sll(rt, rs, shift_left);  // Acts as a move if shift_left == 0.
1065 
1066     int shift_right = 32 - size;
1067     if (shift_right > 0) {
1068       srl(rt, rt, shift_right);
1069     }
1070   }
1071 }
1072 
1073 
Ins(Register rt,Register rs,uint16_t pos,uint16_t size)1074 void MacroAssembler::Ins(Register rt,
1075                          Register rs,
1076                          uint16_t pos,
1077                          uint16_t size) {
1078   ASSERT(pos < 32);
1079   ASSERT(pos + size <= 32);
1080   ASSERT(size != 0);
1081 
1082   if (kArchVariant == kMips32r2) {
1083     ins_(rt, rs, pos, size);
1084   } else {
1085     ASSERT(!rt.is(t8) && !rs.is(t8));
1086     Subu(at, zero_reg, Operand(1));
1087     srl(at, at, 32 - size);
1088     and_(t8, rs, at);
1089     sll(t8, t8, pos);
1090     sll(at, at, pos);
1091     nor(at, at, zero_reg);
1092     and_(at, rt, at);
1093     or_(rt, t8, at);
1094   }
1095 }
1096 
1097 
Cvt_d_uw(FPURegister fd,FPURegister fs,FPURegister scratch)1098 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1099                               FPURegister fs,
1100                               FPURegister scratch) {
1101   // Move the data from fs to t8.
1102   mfc1(t8, fs);
1103   Cvt_d_uw(fd, t8, scratch);
1104 }
1105 
1106 
Cvt_d_uw(FPURegister fd,Register rs,FPURegister scratch)1107 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1108                               Register rs,
1109                               FPURegister scratch) {
1110   // Convert rs to a FP value in fd (and fd + 1).
1111   // We do this by converting rs minus the MSB to avoid sign conversion,
1112   // then adding 2^31 to the result (if needed).
1113 
1114   ASSERT(!fd.is(scratch));
1115   ASSERT(!rs.is(t9));
1116   ASSERT(!rs.is(at));
1117 
1118   // Save rs's MSB to t9.
1119   Ext(t9, rs, 31, 1);
1120   // Remove rs's MSB.
1121   Ext(at, rs, 0, 31);
1122   // Move the result to fd.
1123   mtc1(at, fd);
1124 
1125   // Convert fd to a real FP value.
1126   cvt_d_w(fd, fd);
1127 
1128   Label conversion_done;
1129 
1130   // If rs's MSB was 0, it's done.
1131   // Otherwise we need to add that to the FP register.
1132   Branch(&conversion_done, eq, t9, Operand(zero_reg));
1133 
1134   // Load 2^31 into f20 as its float representation.
1135   li(at, 0x41E00000);
1136   mtc1(at, FPURegister::from_code(scratch.code() + 1));
1137   mtc1(zero_reg, scratch);
1138   // Add it to fd.
1139   add_d(fd, fd, scratch);
1140 
1141   bind(&conversion_done);
1142 }
1143 
1144 
Trunc_uw_d(FPURegister fd,FPURegister fs,FPURegister scratch)1145 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1146                                 FPURegister fs,
1147                                 FPURegister scratch) {
1148   Trunc_uw_d(fs, t8, scratch);
1149   mtc1(t8, fd);
1150 }
1151 
1152 
Trunc_w_d(FPURegister fd,FPURegister fs)1153 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1154   if (kArchVariant == kLoongson && fd.is(fs)) {
1155     mfc1(t8, FPURegister::from_code(fs.code() + 1));
1156     trunc_w_d(fd, fs);
1157     mtc1(t8, FPURegister::from_code(fs.code() + 1));
1158   } else {
1159     trunc_w_d(fd, fs);
1160   }
1161 }
1162 
1163 
Round_w_d(FPURegister fd,FPURegister fs)1164 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1165   if (kArchVariant == kLoongson && fd.is(fs)) {
1166     mfc1(t8, FPURegister::from_code(fs.code() + 1));
1167     round_w_d(fd, fs);
1168     mtc1(t8, FPURegister::from_code(fs.code() + 1));
1169   } else {
1170     round_w_d(fd, fs);
1171   }
1172 }
1173 
1174 
Floor_w_d(FPURegister fd,FPURegister fs)1175 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1176   if (kArchVariant == kLoongson && fd.is(fs)) {
1177     mfc1(t8, FPURegister::from_code(fs.code() + 1));
1178     floor_w_d(fd, fs);
1179     mtc1(t8, FPURegister::from_code(fs.code() + 1));
1180   } else {
1181     floor_w_d(fd, fs);
1182   }
1183 }
1184 
1185 
Ceil_w_d(FPURegister fd,FPURegister fs)1186 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1187   if (kArchVariant == kLoongson && fd.is(fs)) {
1188     mfc1(t8, FPURegister::from_code(fs.code() + 1));
1189     ceil_w_d(fd, fs);
1190     mtc1(t8, FPURegister::from_code(fs.code() + 1));
1191   } else {
1192     ceil_w_d(fd, fs);
1193   }
1194 }
1195 
1196 
Trunc_uw_d(FPURegister fd,Register rs,FPURegister scratch)1197 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1198                                 Register rs,
1199                                 FPURegister scratch) {
1200   ASSERT(!fd.is(scratch));
1201   ASSERT(!rs.is(at));
1202 
1203   // Load 2^31 into scratch as its float representation.
1204   li(at, 0x41E00000);
1205   mtc1(at, FPURegister::from_code(scratch.code() + 1));
1206   mtc1(zero_reg, scratch);
1207   // Test if scratch > fd.
1208   // If fd < 2^31 we can convert it normally.
1209   Label simple_convert;
1210   BranchF(&simple_convert, NULL, lt, fd, scratch);
1211 
1212   // First we subtract 2^31 from fd, then trunc it to rs
1213   // and add 2^31 to rs.
1214   sub_d(scratch, fd, scratch);
1215   trunc_w_d(scratch, scratch);
1216   mfc1(rs, scratch);
1217   Or(rs, rs, 1 << 31);
1218 
1219   Label done;
1220   Branch(&done);
1221   // Simple conversion.
1222   bind(&simple_convert);
1223   trunc_w_d(scratch, fd);
1224   mfc1(rs, scratch);
1225 
1226   bind(&done);
1227 }
1228 
1229 
BranchF(Label * target,Label * nan,Condition cc,FPURegister cmp1,FPURegister cmp2,BranchDelaySlot bd)1230 void MacroAssembler::BranchF(Label* target,
1231                              Label* nan,
1232                              Condition cc,
1233                              FPURegister cmp1,
1234                              FPURegister cmp2,
1235                              BranchDelaySlot bd) {
1236   BlockTrampolinePoolScope block_trampoline_pool(this);
1237   if (cc == al) {
1238     Branch(bd, target);
1239     return;
1240   }
1241 
1242   ASSERT(nan || target);
1243   // Check for unordered (NaN) cases.
1244   if (nan) {
1245     c(UN, D, cmp1, cmp2);
1246     bc1t(nan);
1247   }
1248 
1249   if (target) {
1250     // Here NaN cases were either handled by this function or are assumed to
1251     // have been handled by the caller.
1252     // Unsigned conditions are treated as their signed counterpart.
1253     switch (cc) {
1254       case lt:
1255         c(OLT, D, cmp1, cmp2);
1256         bc1t(target);
1257         break;
1258       case gt:
1259         c(ULE, D, cmp1, cmp2);
1260         bc1f(target);
1261         break;
1262       case ge:
1263         c(ULT, D, cmp1, cmp2);
1264         bc1f(target);
1265         break;
1266       case le:
1267         c(OLE, D, cmp1, cmp2);
1268         bc1t(target);
1269         break;
1270       case eq:
1271         c(EQ, D, cmp1, cmp2);
1272         bc1t(target);
1273         break;
1274       case ueq:
1275         c(UEQ, D, cmp1, cmp2);
1276         bc1t(target);
1277         break;
1278       case ne:
1279         c(EQ, D, cmp1, cmp2);
1280         bc1f(target);
1281         break;
1282       case nue:
1283         c(UEQ, D, cmp1, cmp2);
1284         bc1f(target);
1285         break;
1286       default:
1287         CHECK(0);
1288     }
1289   }
1290 
1291   if (bd == PROTECT) {
1292     nop();
1293   }
1294 }
1295 
1296 
Move(FPURegister dst,double imm)1297 void MacroAssembler::Move(FPURegister dst, double imm) {
1298   static const DoubleRepresentation minus_zero(-0.0);
1299   static const DoubleRepresentation zero(0.0);
1300   DoubleRepresentation value_rep(imm);
1301   // Handle special values first.
1302   bool force_load = dst.is(kDoubleRegZero);
1303   if (value_rep == zero && !force_load) {
1304     mov_d(dst, kDoubleRegZero);
1305   } else if (value_rep == minus_zero && !force_load) {
1306     neg_d(dst, kDoubleRegZero);
1307   } else {
1308     uint32_t lo, hi;
1309     DoubleAsTwoUInt32(imm, &lo, &hi);
1310     // Move the low part of the double into the lower of the corresponding FPU
1311     // register of FPU register pair.
1312     if (lo != 0) {
1313       li(at, Operand(lo));
1314       mtc1(at, dst);
1315     } else {
1316       mtc1(zero_reg, dst);
1317     }
1318     // Move the high part of the double into the higher of the corresponding FPU
1319     // register of FPU register pair.
1320     if (hi != 0) {
1321       li(at, Operand(hi));
1322       mtc1(at, dst.high());
1323     } else {
1324       mtc1(zero_reg, dst.high());
1325     }
1326   }
1327 }
1328 
1329 
Movz(Register rd,Register rs,Register rt)1330 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1331   if (kArchVariant == kLoongson) {
1332     Label done;
1333     Branch(&done, ne, rt, Operand(zero_reg));
1334     mov(rd, rs);
1335     bind(&done);
1336   } else {
1337     movz(rd, rs, rt);
1338   }
1339 }
1340 
1341 
Movn(Register rd,Register rs,Register rt)1342 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1343   if (kArchVariant == kLoongson) {
1344     Label done;
1345     Branch(&done, eq, rt, Operand(zero_reg));
1346     mov(rd, rs);
1347     bind(&done);
1348   } else {
1349     movn(rd, rs, rt);
1350   }
1351 }
1352 
1353 
Movt(Register rd,Register rs,uint16_t cc)1354 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1355   if (kArchVariant == kLoongson) {
1356     // Tests an FP condition code and then conditionally move rs to rd.
1357     // We do not currently use any FPU cc bit other than bit 0.
1358     ASSERT(cc == 0);
1359     ASSERT(!(rs.is(t8) || rd.is(t8)));
1360     Label done;
1361     Register scratch = t8;
1362     // For testing purposes we need to fetch content of the FCSR register and
1363     // than test its cc (floating point condition code) bit (for cc = 0, it is
1364     // 24. bit of the FCSR).
1365     cfc1(scratch, FCSR);
1366     // For the MIPS I, II and III architectures, the contents of scratch is
1367     // UNPREDICTABLE for the instruction immediately following CFC1.
1368     nop();
1369     srl(scratch, scratch, 16);
1370     andi(scratch, scratch, 0x0080);
1371     Branch(&done, eq, scratch, Operand(zero_reg));
1372     mov(rd, rs);
1373     bind(&done);
1374   } else {
1375     movt(rd, rs, cc);
1376   }
1377 }
1378 
1379 
Movf(Register rd,Register rs,uint16_t cc)1380 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1381   if (kArchVariant == kLoongson) {
1382     // Tests an FP condition code and then conditionally move rs to rd.
1383     // We do not currently use any FPU cc bit other than bit 0.
1384     ASSERT(cc == 0);
1385     ASSERT(!(rs.is(t8) || rd.is(t8)));
1386     Label done;
1387     Register scratch = t8;
1388     // For testing purposes we need to fetch content of the FCSR register and
1389     // than test its cc (floating point condition code) bit (for cc = 0, it is
1390     // 24. bit of the FCSR).
1391     cfc1(scratch, FCSR);
1392     // For the MIPS I, II and III architectures, the contents of scratch is
1393     // UNPREDICTABLE for the instruction immediately following CFC1.
1394     nop();
1395     srl(scratch, scratch, 16);
1396     andi(scratch, scratch, 0x0080);
1397     Branch(&done, ne, scratch, Operand(zero_reg));
1398     mov(rd, rs);
1399     bind(&done);
1400   } else {
1401     movf(rd, rs, cc);
1402   }
1403 }
1404 
1405 
Clz(Register rd,Register rs)1406 void MacroAssembler::Clz(Register rd, Register rs) {
1407   if (kArchVariant == kLoongson) {
1408     ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1409     Register mask = t8;
1410     Register scratch = t9;
1411     Label loop, end;
1412     mov(at, rs);
1413     mov(rd, zero_reg);
1414     lui(mask, 0x8000);
1415     bind(&loop);
1416     and_(scratch, at, mask);
1417     Branch(&end, ne, scratch, Operand(zero_reg));
1418     addiu(rd, rd, 1);
1419     Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1420     srl(mask, mask, 1);
1421     bind(&end);
1422   } else {
1423     clz(rd, rs);
1424   }
1425 }
1426 
1427 
EmitFPUTruncate(FPURoundingMode rounding_mode,Register result,DoubleRegister double_input,Register scratch,DoubleRegister double_scratch,Register except_flag,CheckForInexactConversion check_inexact)1428 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1429                                      Register result,
1430                                      DoubleRegister double_input,
1431                                      Register scratch,
1432                                      DoubleRegister double_scratch,
1433                                      Register except_flag,
1434                                      CheckForInexactConversion check_inexact) {
1435   ASSERT(!result.is(scratch));
1436   ASSERT(!double_input.is(double_scratch));
1437   ASSERT(!except_flag.is(scratch));
1438 
1439   Label done;
1440 
1441   // Clear the except flag (0 = no exception)
1442   mov(except_flag, zero_reg);
1443 
1444   // Test for values that can be exactly represented as a signed 32-bit integer.
1445   cvt_w_d(double_scratch, double_input);
1446   mfc1(result, double_scratch);
1447   cvt_d_w(double_scratch, double_scratch);
1448   BranchF(&done, NULL, eq, double_input, double_scratch);
1449 
1450   int32_t except_mask = kFCSRFlagMask;  // Assume interested in all exceptions.
1451 
1452   if (check_inexact == kDontCheckForInexactConversion) {
1453     // Ignore inexact exceptions.
1454     except_mask &= ~kFCSRInexactFlagMask;
1455   }
1456 
1457   // Save FCSR.
1458   cfc1(scratch, FCSR);
1459   // Disable FPU exceptions.
1460   ctc1(zero_reg, FCSR);
1461 
1462   // Do operation based on rounding mode.
1463   switch (rounding_mode) {
1464     case kRoundToNearest:
1465       Round_w_d(double_scratch, double_input);
1466       break;
1467     case kRoundToZero:
1468       Trunc_w_d(double_scratch, double_input);
1469       break;
1470     case kRoundToPlusInf:
1471       Ceil_w_d(double_scratch, double_input);
1472       break;
1473     case kRoundToMinusInf:
1474       Floor_w_d(double_scratch, double_input);
1475       break;
1476   }  // End of switch-statement.
1477 
1478   // Retrieve FCSR.
1479   cfc1(except_flag, FCSR);
1480   // Restore FCSR.
1481   ctc1(scratch, FCSR);
1482   // Move the converted value into the result register.
1483   mfc1(result, double_scratch);
1484 
1485   // Check for fpu exceptions.
1486   And(except_flag, except_flag, Operand(except_mask));
1487 
1488   bind(&done);
1489 }
1490 
1491 
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)1492 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1493                                                 DoubleRegister double_input,
1494                                                 Label* done) {
1495   DoubleRegister single_scratch = kLithiumScratchDouble.low();
1496   Register scratch = at;
1497   Register scratch2 = t9;
1498 
1499   // Clear cumulative exception flags and save the FCSR.
1500   cfc1(scratch2, FCSR);
1501   ctc1(zero_reg, FCSR);
1502   // Try a conversion to a signed integer.
1503   trunc_w_d(single_scratch, double_input);
1504   mfc1(result, single_scratch);
1505   // Retrieve and restore the FCSR.
1506   cfc1(scratch, FCSR);
1507   ctc1(scratch2, FCSR);
1508   // Check for overflow and NaNs.
1509   And(scratch,
1510       scratch,
1511       kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1512   // If we had no exceptions we are done.
1513   Branch(done, eq, scratch, Operand(zero_reg));
1514 }
1515 
1516 
TruncateDoubleToI(Register result,DoubleRegister double_input)1517 void MacroAssembler::TruncateDoubleToI(Register result,
1518                                        DoubleRegister double_input) {
1519   Label done;
1520 
1521   TryInlineTruncateDoubleToI(result, double_input, &done);
1522 
1523   // If we fell through then inline version didn't succeed - call stub instead.
1524   push(ra);
1525   Subu(sp, sp, Operand(kDoubleSize));  // Put input on stack.
1526   sdc1(double_input, MemOperand(sp, 0));
1527 
1528   DoubleToIStub stub(isolate(), sp, result, 0, true, true);
1529   CallStub(&stub);
1530 
1531   Addu(sp, sp, Operand(kDoubleSize));
1532   pop(ra);
1533 
1534   bind(&done);
1535 }
1536 
1537 
TruncateHeapNumberToI(Register result,Register object)1538 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1539   Label done;
1540   DoubleRegister double_scratch = f12;
1541   ASSERT(!result.is(object));
1542 
1543   ldc1(double_scratch,
1544        MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1545   TryInlineTruncateDoubleToI(result, double_scratch, &done);
1546 
1547   // If we fell through then inline version didn't succeed - call stub instead.
1548   push(ra);
1549   DoubleToIStub stub(isolate(),
1550                      object,
1551                      result,
1552                      HeapNumber::kValueOffset - kHeapObjectTag,
1553                      true,
1554                      true);
1555   CallStub(&stub);
1556   pop(ra);
1557 
1558   bind(&done);
1559 }
1560 
1561 
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch,Label * not_number)1562 void MacroAssembler::TruncateNumberToI(Register object,
1563                                        Register result,
1564                                        Register heap_number_map,
1565                                        Register scratch,
1566                                        Label* not_number) {
1567   Label done;
1568   ASSERT(!result.is(object));
1569 
1570   UntagAndJumpIfSmi(result, object, &done);
1571   JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1572   TruncateHeapNumberToI(result, object);
1573 
1574   bind(&done);
1575 }
1576 
1577 
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)1578 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1579                                          Register src,
1580                                          int num_least_bits) {
1581   Ext(dst, src, kSmiTagSize, num_least_bits);
1582 }
1583 
1584 
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)1585 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1586                                            Register src,
1587                                            int num_least_bits) {
1588   And(dst, src, Operand((1 << num_least_bits) - 1));
1589 }
1590 
1591 
1592 // Emulated condtional branches do not emit a nop in the branch delay slot.
1593 //
1594 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1595 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT(                                \
1596     (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) ||          \
1597     (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1598 
1599 
Branch(int16_t offset,BranchDelaySlot bdslot)1600 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1601   BranchShort(offset, bdslot);
1602 }
1603 
1604 
Branch(int16_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)1605 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1606                             const Operand& rt,
1607                             BranchDelaySlot bdslot) {
1608   BranchShort(offset, cond, rs, rt, bdslot);
1609 }
1610 
1611 
Branch(Label * L,BranchDelaySlot bdslot)1612 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1613   if (L->is_bound()) {
1614     if (is_near(L)) {
1615       BranchShort(L, bdslot);
1616     } else {
1617       Jr(L, bdslot);
1618     }
1619   } else {
1620     if (is_trampoline_emitted()) {
1621       Jr(L, bdslot);
1622     } else {
1623       BranchShort(L, bdslot);
1624     }
1625   }
1626 }
1627 
1628 
Branch(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)1629 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1630                             const Operand& rt,
1631                             BranchDelaySlot bdslot) {
1632   if (L->is_bound()) {
1633     if (is_near(L)) {
1634       BranchShort(L, cond, rs, rt, bdslot);
1635     } else {
1636       if (cond != cc_always) {
1637         Label skip;
1638         Condition neg_cond = NegateCondition(cond);
1639         BranchShort(&skip, neg_cond, rs, rt);
1640         Jr(L, bdslot);
1641         bind(&skip);
1642       } else {
1643         Jr(L, bdslot);
1644       }
1645     }
1646   } else {
1647     if (is_trampoline_emitted()) {
1648       if (cond != cc_always) {
1649         Label skip;
1650         Condition neg_cond = NegateCondition(cond);
1651         BranchShort(&skip, neg_cond, rs, rt);
1652         Jr(L, bdslot);
1653         bind(&skip);
1654       } else {
1655         Jr(L, bdslot);
1656       }
1657     } else {
1658       BranchShort(L, cond, rs, rt, bdslot);
1659     }
1660   }
1661 }
1662 
1663 
Branch(Label * L,Condition cond,Register rs,Heap::RootListIndex index,BranchDelaySlot bdslot)1664 void MacroAssembler::Branch(Label* L,
1665                             Condition cond,
1666                             Register rs,
1667                             Heap::RootListIndex index,
1668                             BranchDelaySlot bdslot) {
1669   LoadRoot(at, index);
1670   Branch(L, cond, rs, Operand(at), bdslot);
1671 }
1672 
1673 
BranchShort(int16_t offset,BranchDelaySlot bdslot)1674 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1675   b(offset);
1676 
1677   // Emit a nop in the branch delay slot if required.
1678   if (bdslot == PROTECT)
1679     nop();
1680 }
1681 
1682 
BranchShort(int16_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)1683 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1684                                  const Operand& rt,
1685                                  BranchDelaySlot bdslot) {
1686   BRANCH_ARGS_CHECK(cond, rs, rt);
1687   ASSERT(!rs.is(zero_reg));
1688   Register r2 = no_reg;
1689   Register scratch = at;
1690 
1691   if (rt.is_reg()) {
1692     // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1693     // rt.
1694     BlockTrampolinePoolScope block_trampoline_pool(this);
1695     r2 = rt.rm_;
1696     switch (cond) {
1697       case cc_always:
1698         b(offset);
1699         break;
1700       case eq:
1701         beq(rs, r2, offset);
1702         break;
1703       case ne:
1704         bne(rs, r2, offset);
1705         break;
1706       // Signed comparison.
1707       case greater:
1708         if (r2.is(zero_reg)) {
1709           bgtz(rs, offset);
1710         } else {
1711           slt(scratch, r2, rs);
1712           bne(scratch, zero_reg, offset);
1713         }
1714         break;
1715       case greater_equal:
1716         if (r2.is(zero_reg)) {
1717           bgez(rs, offset);
1718         } else {
1719           slt(scratch, rs, r2);
1720           beq(scratch, zero_reg, offset);
1721         }
1722         break;
1723       case less:
1724         if (r2.is(zero_reg)) {
1725           bltz(rs, offset);
1726         } else {
1727           slt(scratch, rs, r2);
1728           bne(scratch, zero_reg, offset);
1729         }
1730         break;
1731       case less_equal:
1732         if (r2.is(zero_reg)) {
1733           blez(rs, offset);
1734         } else {
1735           slt(scratch, r2, rs);
1736           beq(scratch, zero_reg, offset);
1737         }
1738         break;
1739       // Unsigned comparison.
1740       case Ugreater:
1741         if (r2.is(zero_reg)) {
1742           bgtz(rs, offset);
1743         } else {
1744           sltu(scratch, r2, rs);
1745           bne(scratch, zero_reg, offset);
1746         }
1747         break;
1748       case Ugreater_equal:
1749         if (r2.is(zero_reg)) {
1750           bgez(rs, offset);
1751         } else {
1752           sltu(scratch, rs, r2);
1753           beq(scratch, zero_reg, offset);
1754         }
1755         break;
1756       case Uless:
1757         if (r2.is(zero_reg)) {
1758           // No code needs to be emitted.
1759           return;
1760         } else {
1761           sltu(scratch, rs, r2);
1762           bne(scratch, zero_reg, offset);
1763         }
1764         break;
1765       case Uless_equal:
1766         if (r2.is(zero_reg)) {
1767           b(offset);
1768         } else {
1769           sltu(scratch, r2, rs);
1770           beq(scratch, zero_reg, offset);
1771         }
1772         break;
1773       default:
1774         UNREACHABLE();
1775     }
1776   } else {
1777     // Be careful to always use shifted_branch_offset only just before the
1778     // branch instruction, as the location will be remember for patching the
1779     // target.
1780     BlockTrampolinePoolScope block_trampoline_pool(this);
1781     switch (cond) {
1782       case cc_always:
1783         b(offset);
1784         break;
1785       case eq:
1786         // We don't want any other register but scratch clobbered.
1787         ASSERT(!scratch.is(rs));
1788         r2 = scratch;
1789         li(r2, rt);
1790         beq(rs, r2, offset);
1791         break;
1792       case ne:
1793         // We don't want any other register but scratch clobbered.
1794         ASSERT(!scratch.is(rs));
1795         r2 = scratch;
1796         li(r2, rt);
1797         bne(rs, r2, offset);
1798         break;
1799       // Signed comparison.
1800       case greater:
1801         if (rt.imm32_ == 0) {
1802           bgtz(rs, offset);
1803         } else {
1804           r2 = scratch;
1805           li(r2, rt);
1806           slt(scratch, r2, rs);
1807           bne(scratch, zero_reg, offset);
1808         }
1809         break;
1810       case greater_equal:
1811         if (rt.imm32_ == 0) {
1812           bgez(rs, offset);
1813         } else if (is_int16(rt.imm32_)) {
1814           slti(scratch, rs, rt.imm32_);
1815           beq(scratch, zero_reg, offset);
1816         } else {
1817           r2 = scratch;
1818           li(r2, rt);
1819           slt(scratch, rs, r2);
1820           beq(scratch, zero_reg, offset);
1821         }
1822         break;
1823       case less:
1824         if (rt.imm32_ == 0) {
1825           bltz(rs, offset);
1826         } else if (is_int16(rt.imm32_)) {
1827           slti(scratch, rs, rt.imm32_);
1828           bne(scratch, zero_reg, offset);
1829         } else {
1830           r2 = scratch;
1831           li(r2, rt);
1832           slt(scratch, rs, r2);
1833           bne(scratch, zero_reg, offset);
1834         }
1835         break;
1836       case less_equal:
1837         if (rt.imm32_ == 0) {
1838           blez(rs, offset);
1839         } else {
1840           r2 = scratch;
1841           li(r2, rt);
1842           slt(scratch, r2, rs);
1843           beq(scratch, zero_reg, offset);
1844        }
1845        break;
1846       // Unsigned comparison.
1847       case Ugreater:
1848         if (rt.imm32_ == 0) {
1849           bgtz(rs, offset);
1850         } else {
1851           r2 = scratch;
1852           li(r2, rt);
1853           sltu(scratch, r2, rs);
1854           bne(scratch, zero_reg, offset);
1855         }
1856         break;
1857       case Ugreater_equal:
1858         if (rt.imm32_ == 0) {
1859           bgez(rs, offset);
1860         } else if (is_int16(rt.imm32_)) {
1861           sltiu(scratch, rs, rt.imm32_);
1862           beq(scratch, zero_reg, offset);
1863         } else {
1864           r2 = scratch;
1865           li(r2, rt);
1866           sltu(scratch, rs, r2);
1867           beq(scratch, zero_reg, offset);
1868         }
1869         break;
1870       case Uless:
1871         if (rt.imm32_ == 0) {
1872           // No code needs to be emitted.
1873           return;
1874         } else if (is_int16(rt.imm32_)) {
1875           sltiu(scratch, rs, rt.imm32_);
1876           bne(scratch, zero_reg, offset);
1877         } else {
1878           r2 = scratch;
1879           li(r2, rt);
1880           sltu(scratch, rs, r2);
1881           bne(scratch, zero_reg, offset);
1882         }
1883         break;
1884       case Uless_equal:
1885         if (rt.imm32_ == 0) {
1886           b(offset);
1887         } else {
1888           r2 = scratch;
1889           li(r2, rt);
1890           sltu(scratch, r2, rs);
1891           beq(scratch, zero_reg, offset);
1892         }
1893         break;
1894       default:
1895         UNREACHABLE();
1896     }
1897   }
1898   // Emit a nop in the branch delay slot if required.
1899   if (bdslot == PROTECT)
1900     nop();
1901 }
1902 
1903 
BranchShort(Label * L,BranchDelaySlot bdslot)1904 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
1905   // We use branch_offset as an argument for the branch instructions to be sure
1906   // it is called just before generating the branch instruction, as needed.
1907 
1908   b(shifted_branch_offset(L, false));
1909 
1910   // Emit a nop in the branch delay slot if required.
1911   if (bdslot == PROTECT)
1912     nop();
1913 }
1914 
1915 
BranchShort(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)1916 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
1917                                  const Operand& rt,
1918                                  BranchDelaySlot bdslot) {
1919   BRANCH_ARGS_CHECK(cond, rs, rt);
1920 
1921   int32_t offset = 0;
1922   Register r2 = no_reg;
1923   Register scratch = at;
1924   if (rt.is_reg()) {
1925     BlockTrampolinePoolScope block_trampoline_pool(this);
1926     r2 = rt.rm_;
1927     // Be careful to always use shifted_branch_offset only just before the
1928     // branch instruction, as the location will be remember for patching the
1929     // target.
1930     switch (cond) {
1931       case cc_always:
1932         offset = shifted_branch_offset(L, false);
1933         b(offset);
1934         break;
1935       case eq:
1936         offset = shifted_branch_offset(L, false);
1937         beq(rs, r2, offset);
1938         break;
1939       case ne:
1940         offset = shifted_branch_offset(L, false);
1941         bne(rs, r2, offset);
1942         break;
1943       // Signed comparison.
1944       case greater:
1945         if (r2.is(zero_reg)) {
1946           offset = shifted_branch_offset(L, false);
1947           bgtz(rs, offset);
1948         } else {
1949           slt(scratch, r2, rs);
1950           offset = shifted_branch_offset(L, false);
1951           bne(scratch, zero_reg, offset);
1952         }
1953         break;
1954       case greater_equal:
1955         if (r2.is(zero_reg)) {
1956           offset = shifted_branch_offset(L, false);
1957           bgez(rs, offset);
1958         } else {
1959           slt(scratch, rs, r2);
1960           offset = shifted_branch_offset(L, false);
1961           beq(scratch, zero_reg, offset);
1962         }
1963         break;
1964       case less:
1965         if (r2.is(zero_reg)) {
1966           offset = shifted_branch_offset(L, false);
1967           bltz(rs, offset);
1968         } else {
1969           slt(scratch, rs, r2);
1970           offset = shifted_branch_offset(L, false);
1971           bne(scratch, zero_reg, offset);
1972         }
1973         break;
1974       case less_equal:
1975         if (r2.is(zero_reg)) {
1976           offset = shifted_branch_offset(L, false);
1977           blez(rs, offset);
1978         } else {
1979           slt(scratch, r2, rs);
1980           offset = shifted_branch_offset(L, false);
1981           beq(scratch, zero_reg, offset);
1982         }
1983         break;
1984       // Unsigned comparison.
1985       case Ugreater:
1986         if (r2.is(zero_reg)) {
1987           offset = shifted_branch_offset(L, false);
1988            bgtz(rs, offset);
1989         } else {
1990           sltu(scratch, r2, rs);
1991           offset = shifted_branch_offset(L, false);
1992           bne(scratch, zero_reg, offset);
1993         }
1994         break;
1995       case Ugreater_equal:
1996         if (r2.is(zero_reg)) {
1997           offset = shifted_branch_offset(L, false);
1998           bgez(rs, offset);
1999         } else {
2000           sltu(scratch, rs, r2);
2001           offset = shifted_branch_offset(L, false);
2002           beq(scratch, zero_reg, offset);
2003         }
2004         break;
2005       case Uless:
2006         if (r2.is(zero_reg)) {
2007           // No code needs to be emitted.
2008           return;
2009         } else {
2010           sltu(scratch, rs, r2);
2011           offset = shifted_branch_offset(L, false);
2012           bne(scratch, zero_reg, offset);
2013         }
2014         break;
2015       case Uless_equal:
2016         if (r2.is(zero_reg)) {
2017           offset = shifted_branch_offset(L, false);
2018           b(offset);
2019         } else {
2020           sltu(scratch, r2, rs);
2021           offset = shifted_branch_offset(L, false);
2022           beq(scratch, zero_reg, offset);
2023         }
2024         break;
2025       default:
2026         UNREACHABLE();
2027     }
2028   } else {
2029     // Be careful to always use shifted_branch_offset only just before the
2030     // branch instruction, as the location will be remember for patching the
2031     // target.
2032     BlockTrampolinePoolScope block_trampoline_pool(this);
2033     switch (cond) {
2034       case cc_always:
2035         offset = shifted_branch_offset(L, false);
2036         b(offset);
2037         break;
2038       case eq:
2039         ASSERT(!scratch.is(rs));
2040         r2 = scratch;
2041         li(r2, rt);
2042         offset = shifted_branch_offset(L, false);
2043         beq(rs, r2, offset);
2044         break;
2045       case ne:
2046         ASSERT(!scratch.is(rs));
2047         r2 = scratch;
2048         li(r2, rt);
2049         offset = shifted_branch_offset(L, false);
2050         bne(rs, r2, offset);
2051         break;
2052       // Signed comparison.
2053       case greater:
2054         if (rt.imm32_ == 0) {
2055           offset = shifted_branch_offset(L, false);
2056           bgtz(rs, offset);
2057         } else {
2058           ASSERT(!scratch.is(rs));
2059           r2 = scratch;
2060           li(r2, rt);
2061           slt(scratch, r2, rs);
2062           offset = shifted_branch_offset(L, false);
2063           bne(scratch, zero_reg, offset);
2064         }
2065         break;
2066       case greater_equal:
2067         if (rt.imm32_ == 0) {
2068           offset = shifted_branch_offset(L, false);
2069           bgez(rs, offset);
2070         } else if (is_int16(rt.imm32_)) {
2071           slti(scratch, rs, rt.imm32_);
2072           offset = shifted_branch_offset(L, false);
2073           beq(scratch, zero_reg, offset);
2074         } else {
2075           ASSERT(!scratch.is(rs));
2076           r2 = scratch;
2077           li(r2, rt);
2078           slt(scratch, rs, r2);
2079           offset = shifted_branch_offset(L, false);
2080           beq(scratch, zero_reg, offset);
2081         }
2082         break;
2083       case less:
2084         if (rt.imm32_ == 0) {
2085           offset = shifted_branch_offset(L, false);
2086           bltz(rs, offset);
2087         } else if (is_int16(rt.imm32_)) {
2088           slti(scratch, rs, rt.imm32_);
2089           offset = shifted_branch_offset(L, false);
2090           bne(scratch, zero_reg, offset);
2091         } else {
2092           ASSERT(!scratch.is(rs));
2093           r2 = scratch;
2094           li(r2, rt);
2095           slt(scratch, rs, r2);
2096           offset = shifted_branch_offset(L, false);
2097           bne(scratch, zero_reg, offset);
2098         }
2099         break;
2100       case less_equal:
2101         if (rt.imm32_ == 0) {
2102           offset = shifted_branch_offset(L, false);
2103           blez(rs, offset);
2104         } else {
2105           ASSERT(!scratch.is(rs));
2106           r2 = scratch;
2107           li(r2, rt);
2108           slt(scratch, r2, rs);
2109           offset = shifted_branch_offset(L, false);
2110           beq(scratch, zero_reg, offset);
2111         }
2112         break;
2113       // Unsigned comparison.
2114       case Ugreater:
2115         if (rt.imm32_ == 0) {
2116           offset = shifted_branch_offset(L, false);
2117           bne(rs, zero_reg, offset);
2118         } else {
2119           ASSERT(!scratch.is(rs));
2120           r2 = scratch;
2121           li(r2, rt);
2122           sltu(scratch, r2, rs);
2123           offset = shifted_branch_offset(L, false);
2124           bne(scratch, zero_reg, offset);
2125         }
2126         break;
2127       case Ugreater_equal:
2128         if (rt.imm32_ == 0) {
2129           offset = shifted_branch_offset(L, false);
2130           bgez(rs, offset);
2131         } else if (is_int16(rt.imm32_)) {
2132           sltiu(scratch, rs, rt.imm32_);
2133           offset = shifted_branch_offset(L, false);
2134           beq(scratch, zero_reg, offset);
2135         } else {
2136           ASSERT(!scratch.is(rs));
2137           r2 = scratch;
2138           li(r2, rt);
2139           sltu(scratch, rs, r2);
2140           offset = shifted_branch_offset(L, false);
2141           beq(scratch, zero_reg, offset);
2142         }
2143         break;
2144      case Uless:
2145         if (rt.imm32_ == 0) {
2146           // No code needs to be emitted.
2147           return;
2148         } else if (is_int16(rt.imm32_)) {
2149           sltiu(scratch, rs, rt.imm32_);
2150           offset = shifted_branch_offset(L, false);
2151           bne(scratch, zero_reg, offset);
2152         } else {
2153           ASSERT(!scratch.is(rs));
2154           r2 = scratch;
2155           li(r2, rt);
2156           sltu(scratch, rs, r2);
2157           offset = shifted_branch_offset(L, false);
2158           bne(scratch, zero_reg, offset);
2159         }
2160         break;
2161       case Uless_equal:
2162         if (rt.imm32_ == 0) {
2163           offset = shifted_branch_offset(L, false);
2164           beq(rs, zero_reg, offset);
2165         } else {
2166           ASSERT(!scratch.is(rs));
2167           r2 = scratch;
2168           li(r2, rt);
2169           sltu(scratch, r2, rs);
2170           offset = shifted_branch_offset(L, false);
2171           beq(scratch, zero_reg, offset);
2172         }
2173         break;
2174       default:
2175         UNREACHABLE();
2176     }
2177   }
2178   // Check that offset could actually hold on an int16_t.
2179   ASSERT(is_int16(offset));
2180   // Emit a nop in the branch delay slot if required.
2181   if (bdslot == PROTECT)
2182     nop();
2183 }
2184 
2185 
BranchAndLink(int16_t offset,BranchDelaySlot bdslot)2186 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2187   BranchAndLinkShort(offset, bdslot);
2188 }
2189 
2190 
BranchAndLink(int16_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2191 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2192                                    const Operand& rt,
2193                                    BranchDelaySlot bdslot) {
2194   BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2195 }
2196 
2197 
BranchAndLink(Label * L,BranchDelaySlot bdslot)2198 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2199   if (L->is_bound()) {
2200     if (is_near(L)) {
2201       BranchAndLinkShort(L, bdslot);
2202     } else {
2203       Jalr(L, bdslot);
2204     }
2205   } else {
2206     if (is_trampoline_emitted()) {
2207       Jalr(L, bdslot);
2208     } else {
2209       BranchAndLinkShort(L, bdslot);
2210     }
2211   }
2212 }
2213 
2214 
BranchAndLink(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2215 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2216                                    const Operand& rt,
2217                                    BranchDelaySlot bdslot) {
2218   if (L->is_bound()) {
2219     if (is_near(L)) {
2220       BranchAndLinkShort(L, cond, rs, rt, bdslot);
2221     } else {
2222       Label skip;
2223       Condition neg_cond = NegateCondition(cond);
2224       BranchShort(&skip, neg_cond, rs, rt);
2225       Jalr(L, bdslot);
2226       bind(&skip);
2227     }
2228   } else {
2229     if (is_trampoline_emitted()) {
2230       Label skip;
2231       Condition neg_cond = NegateCondition(cond);
2232       BranchShort(&skip, neg_cond, rs, rt);
2233       Jalr(L, bdslot);
2234       bind(&skip);
2235     } else {
2236       BranchAndLinkShort(L, cond, rs, rt, bdslot);
2237     }
2238   }
2239 }
2240 
2241 
2242 // We need to use a bgezal or bltzal, but they can't be used directly with the
2243 // slt instructions. We could use sub or add instead but we would miss overflow
2244 // cases, so we keep slt and add an intermediate third instruction.
BranchAndLinkShort(int16_t offset,BranchDelaySlot bdslot)2245 void MacroAssembler::BranchAndLinkShort(int16_t offset,
2246                                         BranchDelaySlot bdslot) {
2247   bal(offset);
2248 
2249   // Emit a nop in the branch delay slot if required.
2250   if (bdslot == PROTECT)
2251     nop();
2252 }
2253 
2254 
BranchAndLinkShort(int16_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2255 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2256                                         Register rs, const Operand& rt,
2257                                         BranchDelaySlot bdslot) {
2258   BRANCH_ARGS_CHECK(cond, rs, rt);
2259   Register r2 = no_reg;
2260   Register scratch = at;
2261 
2262   if (rt.is_reg()) {
2263     r2 = rt.rm_;
2264   } else if (cond != cc_always) {
2265     r2 = scratch;
2266     li(r2, rt);
2267   }
2268 
2269   {
2270     BlockTrampolinePoolScope block_trampoline_pool(this);
2271     switch (cond) {
2272       case cc_always:
2273         bal(offset);
2274         break;
2275       case eq:
2276         bne(rs, r2, 2);
2277         nop();
2278         bal(offset);
2279         break;
2280       case ne:
2281         beq(rs, r2, 2);
2282         nop();
2283         bal(offset);
2284         break;
2285 
2286       // Signed comparison.
2287       case greater:
2288         slt(scratch, r2, rs);
2289         addiu(scratch, scratch, -1);
2290         bgezal(scratch, offset);
2291         break;
2292       case greater_equal:
2293         slt(scratch, rs, r2);
2294         addiu(scratch, scratch, -1);
2295         bltzal(scratch, offset);
2296         break;
2297       case less:
2298         slt(scratch, rs, r2);
2299         addiu(scratch, scratch, -1);
2300         bgezal(scratch, offset);
2301         break;
2302       case less_equal:
2303         slt(scratch, r2, rs);
2304         addiu(scratch, scratch, -1);
2305         bltzal(scratch, offset);
2306         break;
2307 
2308       // Unsigned comparison.
2309       case Ugreater:
2310         sltu(scratch, r2, rs);
2311         addiu(scratch, scratch, -1);
2312         bgezal(scratch, offset);
2313         break;
2314       case Ugreater_equal:
2315         sltu(scratch, rs, r2);
2316         addiu(scratch, scratch, -1);
2317         bltzal(scratch, offset);
2318         break;
2319       case Uless:
2320         sltu(scratch, rs, r2);
2321         addiu(scratch, scratch, -1);
2322         bgezal(scratch, offset);
2323         break;
2324       case Uless_equal:
2325         sltu(scratch, r2, rs);
2326         addiu(scratch, scratch, -1);
2327         bltzal(scratch, offset);
2328         break;
2329 
2330       default:
2331         UNREACHABLE();
2332     }
2333   }
2334   // Emit a nop in the branch delay slot if required.
2335   if (bdslot == PROTECT)
2336     nop();
2337 }
2338 
2339 
BranchAndLinkShort(Label * L,BranchDelaySlot bdslot)2340 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2341   bal(shifted_branch_offset(L, false));
2342 
2343   // Emit a nop in the branch delay slot if required.
2344   if (bdslot == PROTECT)
2345     nop();
2346 }
2347 
2348 
BranchAndLinkShort(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2349 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2350                                         const Operand& rt,
2351                                         BranchDelaySlot bdslot) {
2352   BRANCH_ARGS_CHECK(cond, rs, rt);
2353 
2354   int32_t offset = 0;
2355   Register r2 = no_reg;
2356   Register scratch = at;
2357   if (rt.is_reg()) {
2358     r2 = rt.rm_;
2359   } else if (cond != cc_always) {
2360     r2 = scratch;
2361     li(r2, rt);
2362   }
2363 
2364   {
2365     BlockTrampolinePoolScope block_trampoline_pool(this);
2366     switch (cond) {
2367       case cc_always:
2368         offset = shifted_branch_offset(L, false);
2369         bal(offset);
2370         break;
2371       case eq:
2372         bne(rs, r2, 2);
2373         nop();
2374         offset = shifted_branch_offset(L, false);
2375         bal(offset);
2376         break;
2377       case ne:
2378         beq(rs, r2, 2);
2379         nop();
2380         offset = shifted_branch_offset(L, false);
2381         bal(offset);
2382         break;
2383 
2384       // Signed comparison.
2385       case greater:
2386         slt(scratch, r2, rs);
2387         addiu(scratch, scratch, -1);
2388         offset = shifted_branch_offset(L, false);
2389         bgezal(scratch, offset);
2390         break;
2391       case greater_equal:
2392         slt(scratch, rs, r2);
2393         addiu(scratch, scratch, -1);
2394         offset = shifted_branch_offset(L, false);
2395         bltzal(scratch, offset);
2396         break;
2397       case less:
2398         slt(scratch, rs, r2);
2399         addiu(scratch, scratch, -1);
2400         offset = shifted_branch_offset(L, false);
2401         bgezal(scratch, offset);
2402         break;
2403       case less_equal:
2404         slt(scratch, r2, rs);
2405         addiu(scratch, scratch, -1);
2406         offset = shifted_branch_offset(L, false);
2407         bltzal(scratch, offset);
2408         break;
2409 
2410       // Unsigned comparison.
2411       case Ugreater:
2412         sltu(scratch, r2, rs);
2413         addiu(scratch, scratch, -1);
2414         offset = shifted_branch_offset(L, false);
2415         bgezal(scratch, offset);
2416         break;
2417       case Ugreater_equal:
2418         sltu(scratch, rs, r2);
2419         addiu(scratch, scratch, -1);
2420         offset = shifted_branch_offset(L, false);
2421         bltzal(scratch, offset);
2422         break;
2423       case Uless:
2424         sltu(scratch, rs, r2);
2425         addiu(scratch, scratch, -1);
2426         offset = shifted_branch_offset(L, false);
2427         bgezal(scratch, offset);
2428         break;
2429       case Uless_equal:
2430         sltu(scratch, r2, rs);
2431         addiu(scratch, scratch, -1);
2432         offset = shifted_branch_offset(L, false);
2433         bltzal(scratch, offset);
2434         break;
2435 
2436       default:
2437         UNREACHABLE();
2438     }
2439   }
2440   // Check that offset could actually hold on an int16_t.
2441   ASSERT(is_int16(offset));
2442 
2443   // Emit a nop in the branch delay slot if required.
2444   if (bdslot == PROTECT)
2445     nop();
2446 }
2447 
2448 
Jump(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2449 void MacroAssembler::Jump(Register target,
2450                           Condition cond,
2451                           Register rs,
2452                           const Operand& rt,
2453                           BranchDelaySlot bd) {
2454   BlockTrampolinePoolScope block_trampoline_pool(this);
2455   if (cond == cc_always) {
2456     jr(target);
2457   } else {
2458     BRANCH_ARGS_CHECK(cond, rs, rt);
2459     Branch(2, NegateCondition(cond), rs, rt);
2460     jr(target);
2461   }
2462   // Emit a nop in the branch delay slot if required.
2463   if (bd == PROTECT)
2464     nop();
2465 }
2466 
2467 
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2468 void MacroAssembler::Jump(intptr_t target,
2469                           RelocInfo::Mode rmode,
2470                           Condition cond,
2471                           Register rs,
2472                           const Operand& rt,
2473                           BranchDelaySlot bd) {
2474   Label skip;
2475   if (cond != cc_always) {
2476     Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2477   }
2478   // The first instruction of 'li' may be placed in the delay slot.
2479   // This is not an issue, t9 is expected to be clobbered anyway.
2480   li(t9, Operand(target, rmode));
2481   Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2482   bind(&skip);
2483 }
2484 
2485 
Jump(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2486 void MacroAssembler::Jump(Address target,
2487                           RelocInfo::Mode rmode,
2488                           Condition cond,
2489                           Register rs,
2490                           const Operand& rt,
2491                           BranchDelaySlot bd) {
2492   ASSERT(!RelocInfo::IsCodeTarget(rmode));
2493   Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2494 }
2495 
2496 
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2497 void MacroAssembler::Jump(Handle<Code> code,
2498                           RelocInfo::Mode rmode,
2499                           Condition cond,
2500                           Register rs,
2501                           const Operand& rt,
2502                           BranchDelaySlot bd) {
2503   ASSERT(RelocInfo::IsCodeTarget(rmode));
2504   AllowDeferredHandleDereference embedding_raw_address;
2505   Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2506 }
2507 
2508 
CallSize(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2509 int MacroAssembler::CallSize(Register target,
2510                              Condition cond,
2511                              Register rs,
2512                              const Operand& rt,
2513                              BranchDelaySlot bd) {
2514   int size = 0;
2515 
2516   if (cond == cc_always) {
2517     size += 1;
2518   } else {
2519     size += 3;
2520   }
2521 
2522   if (bd == PROTECT)
2523     size += 1;
2524 
2525   return size * kInstrSize;
2526 }
2527 
2528 
2529 // Note: To call gcc-compiled C code on mips, you must call thru t9.
Call(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2530 void MacroAssembler::Call(Register target,
2531                           Condition cond,
2532                           Register rs,
2533                           const Operand& rt,
2534                           BranchDelaySlot bd) {
2535   BlockTrampolinePoolScope block_trampoline_pool(this);
2536   Label start;
2537   bind(&start);
2538   if (cond == cc_always) {
2539     jalr(target);
2540   } else {
2541     BRANCH_ARGS_CHECK(cond, rs, rt);
2542     Branch(2, NegateCondition(cond), rs, rt);
2543     jalr(target);
2544   }
2545   // Emit a nop in the branch delay slot if required.
2546   if (bd == PROTECT)
2547     nop();
2548 
2549   ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2550             SizeOfCodeGeneratedSince(&start));
2551 }
2552 
2553 
CallSize(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2554 int MacroAssembler::CallSize(Address target,
2555                              RelocInfo::Mode rmode,
2556                              Condition cond,
2557                              Register rs,
2558                              const Operand& rt,
2559                              BranchDelaySlot bd) {
2560   int size = CallSize(t9, cond, rs, rt, bd);
2561   return size + 2 * kInstrSize;
2562 }
2563 
2564 
Call(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2565 void MacroAssembler::Call(Address target,
2566                           RelocInfo::Mode rmode,
2567                           Condition cond,
2568                           Register rs,
2569                           const Operand& rt,
2570                           BranchDelaySlot bd) {
2571   BlockTrampolinePoolScope block_trampoline_pool(this);
2572   Label start;
2573   bind(&start);
2574   int32_t target_int = reinterpret_cast<int32_t>(target);
2575   // Must record previous source positions before the
2576   // li() generates a new code target.
2577   positions_recorder()->WriteRecordedPositions();
2578   li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
2579   Call(t9, cond, rs, rt, bd);
2580   ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2581             SizeOfCodeGeneratedSince(&start));
2582 }
2583 
2584 
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2585 int MacroAssembler::CallSize(Handle<Code> code,
2586                              RelocInfo::Mode rmode,
2587                              TypeFeedbackId ast_id,
2588                              Condition cond,
2589                              Register rs,
2590                              const Operand& rt,
2591                              BranchDelaySlot bd) {
2592   AllowDeferredHandleDereference using_raw_address;
2593   return CallSize(reinterpret_cast<Address>(code.location()),
2594       rmode, cond, rs, rt, bd);
2595 }
2596 
2597 
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2598 void MacroAssembler::Call(Handle<Code> code,
2599                           RelocInfo::Mode rmode,
2600                           TypeFeedbackId ast_id,
2601                           Condition cond,
2602                           Register rs,
2603                           const Operand& rt,
2604                           BranchDelaySlot bd) {
2605   BlockTrampolinePoolScope block_trampoline_pool(this);
2606   Label start;
2607   bind(&start);
2608   ASSERT(RelocInfo::IsCodeTarget(rmode));
2609   if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
2610     SetRecordedAstId(ast_id);
2611     rmode = RelocInfo::CODE_TARGET_WITH_ID;
2612   }
2613   AllowDeferredHandleDereference embedding_raw_address;
2614   Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2615   ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2616             SizeOfCodeGeneratedSince(&start));
2617 }
2618 
2619 
Ret(Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2620 void MacroAssembler::Ret(Condition cond,
2621                          Register rs,
2622                          const Operand& rt,
2623                          BranchDelaySlot bd) {
2624   Jump(ra, cond, rs, rt, bd);
2625 }
2626 
2627 
J(Label * L,BranchDelaySlot bdslot)2628 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2629   BlockTrampolinePoolScope block_trampoline_pool(this);
2630 
2631   uint32_t imm28;
2632   imm28 = jump_address(L);
2633   imm28 &= kImm28Mask;
2634   { BlockGrowBufferScope block_buf_growth(this);
2635     // Buffer growth (and relocation) must be blocked for internal references
2636     // until associated instructions are emitted and available to be patched.
2637     RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2638     j(imm28);
2639   }
2640   // Emit a nop in the branch delay slot if required.
2641   if (bdslot == PROTECT)
2642     nop();
2643 }
2644 
2645 
Jr(Label * L,BranchDelaySlot bdslot)2646 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2647   BlockTrampolinePoolScope block_trampoline_pool(this);
2648 
2649   uint32_t imm32;
2650   imm32 = jump_address(L);
2651   { BlockGrowBufferScope block_buf_growth(this);
2652     // Buffer growth (and relocation) must be blocked for internal references
2653     // until associated instructions are emitted and available to be patched.
2654     RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2655     lui(at, (imm32 & kHiMask) >> kLuiShift);
2656     ori(at, at, (imm32 & kImm16Mask));
2657   }
2658   jr(at);
2659 
2660   // Emit a nop in the branch delay slot if required.
2661   if (bdslot == PROTECT)
2662     nop();
2663 }
2664 
2665 
Jalr(Label * L,BranchDelaySlot bdslot)2666 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2667   BlockTrampolinePoolScope block_trampoline_pool(this);
2668 
2669   uint32_t imm32;
2670   imm32 = jump_address(L);
2671   { BlockGrowBufferScope block_buf_growth(this);
2672     // Buffer growth (and relocation) must be blocked for internal references
2673     // until associated instructions are emitted and available to be patched.
2674     RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2675     lui(at, (imm32 & kHiMask) >> kLuiShift);
2676     ori(at, at, (imm32 & kImm16Mask));
2677   }
2678   jalr(at);
2679 
2680   // Emit a nop in the branch delay slot if required.
2681   if (bdslot == PROTECT)
2682     nop();
2683 }
2684 
2685 
DropAndRet(int drop)2686 void MacroAssembler::DropAndRet(int drop) {
2687   Ret(USE_DELAY_SLOT);
2688   addiu(sp, sp, drop * kPointerSize);
2689 }
2690 
DropAndRet(int drop,Condition cond,Register r1,const Operand & r2)2691 void MacroAssembler::DropAndRet(int drop,
2692                                 Condition cond,
2693                                 Register r1,
2694                                 const Operand& r2) {
2695   // Both Drop and Ret need to be conditional.
2696   Label skip;
2697   if (cond != cc_always) {
2698     Branch(&skip, NegateCondition(cond), r1, r2);
2699   }
2700 
2701   Drop(drop);
2702   Ret();
2703 
2704   if (cond != cc_always) {
2705     bind(&skip);
2706   }
2707 }
2708 
2709 
Drop(int count,Condition cond,Register reg,const Operand & op)2710 void MacroAssembler::Drop(int count,
2711                           Condition cond,
2712                           Register reg,
2713                           const Operand& op) {
2714   if (count <= 0) {
2715     return;
2716   }
2717 
2718   Label skip;
2719 
2720   if (cond != al) {
2721      Branch(&skip, NegateCondition(cond), reg, op);
2722   }
2723 
2724   addiu(sp, sp, count * kPointerSize);
2725 
2726   if (cond != al) {
2727     bind(&skip);
2728   }
2729 }
2730 
2731 
2732 
Swap(Register reg1,Register reg2,Register scratch)2733 void MacroAssembler::Swap(Register reg1,
2734                           Register reg2,
2735                           Register scratch) {
2736   if (scratch.is(no_reg)) {
2737     Xor(reg1, reg1, Operand(reg2));
2738     Xor(reg2, reg2, Operand(reg1));
2739     Xor(reg1, reg1, Operand(reg2));
2740   } else {
2741     mov(scratch, reg1);
2742     mov(reg1, reg2);
2743     mov(reg2, scratch);
2744   }
2745 }
2746 
2747 
Call(Label * target)2748 void MacroAssembler::Call(Label* target) {
2749   BranchAndLink(target);
2750 }
2751 
2752 
Push(Handle<Object> handle)2753 void MacroAssembler::Push(Handle<Object> handle) {
2754   li(at, Operand(handle));
2755   push(at);
2756 }
2757 
2758 
DebugBreak()2759 void MacroAssembler::DebugBreak() {
2760   PrepareCEntryArgs(0);
2761   PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
2762   CEntryStub ces(isolate(), 1);
2763   ASSERT(AllowThisStubCall(&ces));
2764   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
2765 }
2766 
2767 
2768 // ---------------------------------------------------------------------------
2769 // Exception handling.
2770 
PushTryHandler(StackHandler::Kind kind,int handler_index)2771 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2772                                     int handler_index) {
2773   // Adjust this code if not the case.
2774   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2775   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2776   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2777   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2778   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2779   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2780 
2781   // For the JSEntry handler, we must preserve a0-a3 and s0.
2782   // t1-t3 are available. We will build up the handler from the bottom by
2783   // pushing on the stack.
2784   // Set up the code object (t1) and the state (t2) for pushing.
2785   unsigned state =
2786       StackHandler::IndexField::encode(handler_index) |
2787       StackHandler::KindField::encode(kind);
2788   li(t1, Operand(CodeObject()), CONSTANT_SIZE);
2789   li(t2, Operand(state));
2790 
2791   // Push the frame pointer, context, state, and code object.
2792   if (kind == StackHandler::JS_ENTRY) {
2793     ASSERT_EQ(Smi::FromInt(0), 0);
2794     // The second zero_reg indicates no context.
2795     // The first zero_reg is the NULL frame pointer.
2796     // The operands are reversed to match the order of MultiPush/Pop.
2797     Push(zero_reg, zero_reg, t2, t1);
2798   } else {
2799     MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
2800   }
2801 
2802   // Link the current handler as the next handler.
2803   li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2804   lw(t1, MemOperand(t2));
2805   push(t1);
2806   // Set this new handler as the current one.
2807   sw(sp, MemOperand(t2));
2808 }
2809 
2810 
PopTryHandler()2811 void MacroAssembler::PopTryHandler() {
2812   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2813   pop(a1);
2814   Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
2815   li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2816   sw(a1, MemOperand(at));
2817 }
2818 
2819 
JumpToHandlerEntry()2820 void MacroAssembler::JumpToHandlerEntry() {
2821   // Compute the handler entry address and jump to it.  The handler table is
2822   // a fixed array of (smi-tagged) code offsets.
2823   // v0 = exception, a1 = code object, a2 = state.
2824   lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset));  // Handler table.
2825   Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2826   srl(a2, a2, StackHandler::kKindWidth);  // Handler index.
2827   sll(a2, a2, kPointerSizeLog2);
2828   Addu(a2, a3, a2);
2829   lw(a2, MemOperand(a2));  // Smi-tagged offset.
2830   Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
2831   sra(t9, a2, kSmiTagSize);
2832   Addu(t9, t9, a1);
2833   Jump(t9);  // Jump.
2834 }
2835 
2836 
Throw(Register value)2837 void MacroAssembler::Throw(Register value) {
2838   // Adjust this code if not the case.
2839   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2840   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2841   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2842   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2843   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2844   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2845 
2846   // The exception is expected in v0.
2847   Move(v0, value);
2848 
2849   // Drop the stack pointer to the top of the top handler.
2850   li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
2851                                    isolate())));
2852   lw(sp, MemOperand(a3));
2853 
2854   // Restore the next handler.
2855   pop(a2);
2856   sw(a2, MemOperand(a3));
2857 
2858   // Get the code object (a1) and state (a2).  Restore the context and frame
2859   // pointer.
2860   MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2861 
2862   // If the handler is a JS frame, restore the context to the frame.
2863   // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
2864   // or cp.
2865   Label done;
2866   Branch(&done, eq, cp, Operand(zero_reg));
2867   sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2868   bind(&done);
2869 
2870   JumpToHandlerEntry();
2871 }
2872 
2873 
ThrowUncatchable(Register value)2874 void MacroAssembler::ThrowUncatchable(Register value) {
2875   // Adjust this code if not the case.
2876   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2877   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2878   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2879   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2880   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2881   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2882 
2883   // The exception is expected in v0.
2884   if (!value.is(v0)) {
2885     mov(v0, value);
2886   }
2887   // Drop the stack pointer to the top of the top stack handler.
2888   li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2889   lw(sp, MemOperand(a3));
2890 
2891   // Unwind the handlers until the ENTRY handler is found.
2892   Label fetch_next, check_kind;
2893   jmp(&check_kind);
2894   bind(&fetch_next);
2895   lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
2896 
2897   bind(&check_kind);
2898   STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
2899   lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
2900   And(a2, a2, Operand(StackHandler::KindField::kMask));
2901   Branch(&fetch_next, ne, a2, Operand(zero_reg));
2902 
2903   // Set the top handler address to next handler past the top ENTRY handler.
2904   pop(a2);
2905   sw(a2, MemOperand(a3));
2906 
2907   // Get the code object (a1) and state (a2).  Clear the context and frame
2908   // pointer (0 was saved in the handler).
2909   MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2910 
2911   JumpToHandlerEntry();
2912 }
2913 
2914 
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)2915 void MacroAssembler::Allocate(int object_size,
2916                               Register result,
2917                               Register scratch1,
2918                               Register scratch2,
2919                               Label* gc_required,
2920                               AllocationFlags flags) {
2921   ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
2922   if (!FLAG_inline_new) {
2923     if (emit_debug_code()) {
2924       // Trash the registers to simulate an allocation failure.
2925       li(result, 0x7091);
2926       li(scratch1, 0x7191);
2927       li(scratch2, 0x7291);
2928     }
2929     jmp(gc_required);
2930     return;
2931   }
2932 
2933   ASSERT(!result.is(scratch1));
2934   ASSERT(!result.is(scratch2));
2935   ASSERT(!scratch1.is(scratch2));
2936   ASSERT(!scratch1.is(t9));
2937   ASSERT(!scratch2.is(t9));
2938   ASSERT(!result.is(t9));
2939 
2940   // Make object size into bytes.
2941   if ((flags & SIZE_IN_WORDS) != 0) {
2942     object_size *= kPointerSize;
2943   }
2944   ASSERT_EQ(0, object_size & kObjectAlignmentMask);
2945 
2946   // Check relative positions of allocation top and limit addresses.
2947   // ARM adds additional checks to make sure the ldm instruction can be
2948   // used. On MIPS we don't have ldm so we don't need additional checks either.
2949   ExternalReference allocation_top =
2950       AllocationUtils::GetAllocationTopReference(isolate(), flags);
2951   ExternalReference allocation_limit =
2952       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2953 
2954   intptr_t top   =
2955       reinterpret_cast<intptr_t>(allocation_top.address());
2956   intptr_t limit =
2957       reinterpret_cast<intptr_t>(allocation_limit.address());
2958   ASSERT((limit - top) == kPointerSize);
2959 
2960   // Set up allocation top address and object size registers.
2961   Register topaddr = scratch1;
2962   li(topaddr, Operand(allocation_top));
2963 
2964   // This code stores a temporary value in t9.
2965   if ((flags & RESULT_CONTAINS_TOP) == 0) {
2966     // Load allocation top into result and allocation limit into t9.
2967     lw(result, MemOperand(topaddr));
2968     lw(t9, MemOperand(topaddr, kPointerSize));
2969   } else {
2970     if (emit_debug_code()) {
2971       // Assert that result actually contains top on entry. t9 is used
2972       // immediately below so this use of t9 does not cause difference with
2973       // respect to register content between debug and release mode.
2974       lw(t9, MemOperand(topaddr));
2975       Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
2976     }
2977     // Load allocation limit into t9. Result already contains allocation top.
2978     lw(t9, MemOperand(topaddr, limit - top));
2979   }
2980 
2981   if ((flags & DOUBLE_ALIGNMENT) != 0) {
2982     // Align the next allocation. Storing the filler map without checking top is
2983     // safe in new-space because the limit of the heap is aligned there.
2984     ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
2985     ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
2986     And(scratch2, result, Operand(kDoubleAlignmentMask));
2987     Label aligned;
2988     Branch(&aligned, eq, scratch2, Operand(zero_reg));
2989     if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
2990       Branch(gc_required, Ugreater_equal, result, Operand(t9));
2991     }
2992     li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
2993     sw(scratch2, MemOperand(result));
2994     Addu(result, result, Operand(kDoubleSize / 2));
2995     bind(&aligned);
2996   }
2997 
2998   // Calculate new top and bail out if new space is exhausted. Use result
2999   // to calculate the new top.
3000   Addu(scratch2, result, Operand(object_size));
3001   Branch(gc_required, Ugreater, scratch2, Operand(t9));
3002   sw(scratch2, MemOperand(topaddr));
3003 
3004   // Tag object if requested.
3005   if ((flags & TAG_OBJECT) != 0) {
3006     Addu(result, result, Operand(kHeapObjectTag));
3007   }
3008 }
3009 
3010 
Allocate(Register object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)3011 void MacroAssembler::Allocate(Register object_size,
3012                               Register result,
3013                               Register scratch1,
3014                               Register scratch2,
3015                               Label* gc_required,
3016                               AllocationFlags flags) {
3017   if (!FLAG_inline_new) {
3018     if (emit_debug_code()) {
3019       // Trash the registers to simulate an allocation failure.
3020       li(result, 0x7091);
3021       li(scratch1, 0x7191);
3022       li(scratch2, 0x7291);
3023     }
3024     jmp(gc_required);
3025     return;
3026   }
3027 
3028   ASSERT(!result.is(scratch1));
3029   ASSERT(!result.is(scratch2));
3030   ASSERT(!scratch1.is(scratch2));
3031   ASSERT(!object_size.is(t9));
3032   ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
3033 
3034   // Check relative positions of allocation top and limit addresses.
3035   // ARM adds additional checks to make sure the ldm instruction can be
3036   // used. On MIPS we don't have ldm so we don't need additional checks either.
3037   ExternalReference allocation_top =
3038       AllocationUtils::GetAllocationTopReference(isolate(), flags);
3039   ExternalReference allocation_limit =
3040       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3041   intptr_t top   =
3042       reinterpret_cast<intptr_t>(allocation_top.address());
3043   intptr_t limit =
3044       reinterpret_cast<intptr_t>(allocation_limit.address());
3045   ASSERT((limit - top) == kPointerSize);
3046 
3047   // Set up allocation top address and object size registers.
3048   Register topaddr = scratch1;
3049   li(topaddr, Operand(allocation_top));
3050 
3051   // This code stores a temporary value in t9.
3052   if ((flags & RESULT_CONTAINS_TOP) == 0) {
3053     // Load allocation top into result and allocation limit into t9.
3054     lw(result, MemOperand(topaddr));
3055     lw(t9, MemOperand(topaddr, kPointerSize));
3056   } else {
3057     if (emit_debug_code()) {
3058       // Assert that result actually contains top on entry. t9 is used
3059       // immediately below so this use of t9 does not cause difference with
3060       // respect to register content between debug and release mode.
3061       lw(t9, MemOperand(topaddr));
3062       Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3063     }
3064     // Load allocation limit into t9. Result already contains allocation top.
3065     lw(t9, MemOperand(topaddr, limit - top));
3066   }
3067 
3068   if ((flags & DOUBLE_ALIGNMENT) != 0) {
3069     // Align the next allocation. Storing the filler map without checking top is
3070     // safe in new-space because the limit of the heap is aligned there.
3071     ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
3072     ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
3073     And(scratch2, result, Operand(kDoubleAlignmentMask));
3074     Label aligned;
3075     Branch(&aligned, eq, scratch2, Operand(zero_reg));
3076     if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
3077       Branch(gc_required, Ugreater_equal, result, Operand(t9));
3078     }
3079     li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3080     sw(scratch2, MemOperand(result));
3081     Addu(result, result, Operand(kDoubleSize / 2));
3082     bind(&aligned);
3083   }
3084 
3085   // Calculate new top and bail out if new space is exhausted. Use result
3086   // to calculate the new top. Object size may be in words so a shift is
3087   // required to get the number of bytes.
3088   if ((flags & SIZE_IN_WORDS) != 0) {
3089     sll(scratch2, object_size, kPointerSizeLog2);
3090     Addu(scratch2, result, scratch2);
3091   } else {
3092     Addu(scratch2, result, Operand(object_size));
3093   }
3094   Branch(gc_required, Ugreater, scratch2, Operand(t9));
3095 
3096   // Update allocation top. result temporarily holds the new top.
3097   if (emit_debug_code()) {
3098     And(t9, scratch2, Operand(kObjectAlignmentMask));
3099     Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3100   }
3101   sw(scratch2, MemOperand(topaddr));
3102 
3103   // Tag object if requested.
3104   if ((flags & TAG_OBJECT) != 0) {
3105     Addu(result, result, Operand(kHeapObjectTag));
3106   }
3107 }
3108 
3109 
UndoAllocationInNewSpace(Register object,Register scratch)3110 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3111                                               Register scratch) {
3112   ExternalReference new_space_allocation_top =
3113       ExternalReference::new_space_allocation_top_address(isolate());
3114 
3115   // Make sure the object has no tag before resetting top.
3116   And(object, object, Operand(~kHeapObjectTagMask));
3117 #ifdef DEBUG
3118   // Check that the object un-allocated is below the current top.
3119   li(scratch, Operand(new_space_allocation_top));
3120   lw(scratch, MemOperand(scratch));
3121   Check(less, kUndoAllocationOfNonAllocatedMemory,
3122       object, Operand(scratch));
3123 #endif
3124   // Write the address of the object to un-allocate as the current top.
3125   li(scratch, Operand(new_space_allocation_top));
3126   sw(object, MemOperand(scratch));
3127 }
3128 
3129 
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)3130 void MacroAssembler::AllocateTwoByteString(Register result,
3131                                            Register length,
3132                                            Register scratch1,
3133                                            Register scratch2,
3134                                            Register scratch3,
3135                                            Label* gc_required) {
3136   // Calculate the number of bytes needed for the characters in the string while
3137   // observing object alignment.
3138   ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3139   sll(scratch1, length, 1);  // Length in bytes, not chars.
3140   addiu(scratch1, scratch1,
3141        kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3142   And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3143 
3144   // Allocate two-byte string in new space.
3145   Allocate(scratch1,
3146            result,
3147            scratch2,
3148            scratch3,
3149            gc_required,
3150            TAG_OBJECT);
3151 
3152   // Set the map, length and hash field.
3153   InitializeNewString(result,
3154                       length,
3155                       Heap::kStringMapRootIndex,
3156                       scratch1,
3157                       scratch2);
3158 }
3159 
3160 
AllocateAsciiString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)3161 void MacroAssembler::AllocateAsciiString(Register result,
3162                                          Register length,
3163                                          Register scratch1,
3164                                          Register scratch2,
3165                                          Register scratch3,
3166                                          Label* gc_required) {
3167   // Calculate the number of bytes needed for the characters in the string
3168   // while observing object alignment.
3169   ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3170   ASSERT(kCharSize == 1);
3171   addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3172   And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3173 
3174   // Allocate ASCII string in new space.
3175   Allocate(scratch1,
3176            result,
3177            scratch2,
3178            scratch3,
3179            gc_required,
3180            TAG_OBJECT);
3181 
3182   // Set the map, length and hash field.
3183   InitializeNewString(result,
3184                       length,
3185                       Heap::kAsciiStringMapRootIndex,
3186                       scratch1,
3187                       scratch2);
3188 }
3189 
3190 
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3191 void MacroAssembler::AllocateTwoByteConsString(Register result,
3192                                                Register length,
3193                                                Register scratch1,
3194                                                Register scratch2,
3195                                                Label* gc_required) {
3196   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3197            TAG_OBJECT);
3198   InitializeNewString(result,
3199                       length,
3200                       Heap::kConsStringMapRootIndex,
3201                       scratch1,
3202                       scratch2);
3203 }
3204 
3205 
AllocateAsciiConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3206 void MacroAssembler::AllocateAsciiConsString(Register result,
3207                                              Register length,
3208                                              Register scratch1,
3209                                              Register scratch2,
3210                                              Label* gc_required) {
3211   Allocate(ConsString::kSize,
3212            result,
3213            scratch1,
3214            scratch2,
3215            gc_required,
3216            TAG_OBJECT);
3217 
3218   InitializeNewString(result,
3219                       length,
3220                       Heap::kConsAsciiStringMapRootIndex,
3221                       scratch1,
3222                       scratch2);
3223 }
3224 
3225 
AllocateTwoByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3226 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3227                                                  Register length,
3228                                                  Register scratch1,
3229                                                  Register scratch2,
3230                                                  Label* gc_required) {
3231   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3232            TAG_OBJECT);
3233 
3234   InitializeNewString(result,
3235                       length,
3236                       Heap::kSlicedStringMapRootIndex,
3237                       scratch1,
3238                       scratch2);
3239 }
3240 
3241 
AllocateAsciiSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3242 void MacroAssembler::AllocateAsciiSlicedString(Register result,
3243                                                Register length,
3244                                                Register scratch1,
3245                                                Register scratch2,
3246                                                Label* gc_required) {
3247   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3248            TAG_OBJECT);
3249 
3250   InitializeNewString(result,
3251                       length,
3252                       Heap::kSlicedAsciiStringMapRootIndex,
3253                       scratch1,
3254                       scratch2);
3255 }
3256 
3257 
JumpIfNotUniqueName(Register reg,Label * not_unique_name)3258 void MacroAssembler::JumpIfNotUniqueName(Register reg,
3259                                          Label* not_unique_name) {
3260   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3261   Label succeed;
3262   And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3263   Branch(&succeed, eq, at, Operand(zero_reg));
3264   Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3265 
3266   bind(&succeed);
3267 }
3268 
3269 
3270 // Allocates a heap number or jumps to the label if the young space is full and
3271 // a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * need_gc,TaggingMode tagging_mode)3272 void MacroAssembler::AllocateHeapNumber(Register result,
3273                                         Register scratch1,
3274                                         Register scratch2,
3275                                         Register heap_number_map,
3276                                         Label* need_gc,
3277                                         TaggingMode tagging_mode) {
3278   // Allocate an object in the heap for the heap number and tag it as a heap
3279   // object.
3280   Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3281            tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3282 
3283   // Store heap number map in the allocated object.
3284   AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3285   if (tagging_mode == TAG_RESULT) {
3286     sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3287   } else {
3288     sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3289   }
3290 }
3291 
3292 
AllocateHeapNumberWithValue(Register result,FPURegister value,Register scratch1,Register scratch2,Label * gc_required)3293 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3294                                                  FPURegister value,
3295                                                  Register scratch1,
3296                                                  Register scratch2,
3297                                                  Label* gc_required) {
3298   LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3299   AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3300   sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3301 }
3302 
3303 
3304 // Copies a fixed number of fields of heap objects from src to dst.
CopyFields(Register dst,Register src,RegList temps,int field_count)3305 void MacroAssembler::CopyFields(Register dst,
3306                                 Register src,
3307                                 RegList temps,
3308                                 int field_count) {
3309   ASSERT((temps & dst.bit()) == 0);
3310   ASSERT((temps & src.bit()) == 0);
3311   // Primitive implementation using only one temporary register.
3312 
3313   Register tmp = no_reg;
3314   // Find a temp register in temps list.
3315   for (int i = 0; i < kNumRegisters; i++) {
3316     if ((temps & (1 << i)) != 0) {
3317       tmp.code_ = i;
3318       break;
3319     }
3320   }
3321   ASSERT(!tmp.is(no_reg));
3322 
3323   for (int i = 0; i < field_count; i++) {
3324     lw(tmp, FieldMemOperand(src, i * kPointerSize));
3325     sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3326   }
3327 }
3328 
3329 
CopyBytes(Register src,Register dst,Register length,Register scratch)3330 void MacroAssembler::CopyBytes(Register src,
3331                                Register dst,
3332                                Register length,
3333                                Register scratch) {
3334   Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3335 
3336   // Align src before copying in word size chunks.
3337   Branch(&byte_loop, le, length, Operand(kPointerSize));
3338   bind(&align_loop_1);
3339   And(scratch, src, kPointerSize - 1);
3340   Branch(&word_loop, eq, scratch, Operand(zero_reg));
3341   lbu(scratch, MemOperand(src));
3342   Addu(src, src, 1);
3343   sb(scratch, MemOperand(dst));
3344   Addu(dst, dst, 1);
3345   Subu(length, length, Operand(1));
3346   Branch(&align_loop_1, ne, length, Operand(zero_reg));
3347 
3348   // Copy bytes in word size chunks.
3349   bind(&word_loop);
3350   if (emit_debug_code()) {
3351     And(scratch, src, kPointerSize - 1);
3352     Assert(eq, kExpectingAlignmentForCopyBytes,
3353         scratch, Operand(zero_reg));
3354   }
3355   Branch(&byte_loop, lt, length, Operand(kPointerSize));
3356   lw(scratch, MemOperand(src));
3357   Addu(src, src, kPointerSize);
3358 
3359   // TODO(kalmard) check if this can be optimized to use sw in most cases.
3360   // Can't use unaligned access - copy byte by byte.
3361   if (kArchEndian == kLittle) {
3362     sb(scratch, MemOperand(dst, 0));
3363     srl(scratch, scratch, 8);
3364     sb(scratch, MemOperand(dst, 1));
3365     srl(scratch, scratch, 8);
3366     sb(scratch, MemOperand(dst, 2));
3367     srl(scratch, scratch, 8);
3368     sb(scratch, MemOperand(dst, 3));
3369   } else {
3370     sb(scratch, MemOperand(dst, 3));
3371     srl(scratch, scratch, 8);
3372     sb(scratch, MemOperand(dst, 2));
3373     srl(scratch, scratch, 8);
3374     sb(scratch, MemOperand(dst, 1));
3375     srl(scratch, scratch, 8);
3376     sb(scratch, MemOperand(dst, 0));
3377   }
3378 
3379   Addu(dst, dst, 4);
3380 
3381   Subu(length, length, Operand(kPointerSize));
3382   Branch(&word_loop);
3383 
3384   // Copy the last bytes if any left.
3385   bind(&byte_loop);
3386   Branch(&done, eq, length, Operand(zero_reg));
3387   bind(&byte_loop_1);
3388   lbu(scratch, MemOperand(src));
3389   Addu(src, src, 1);
3390   sb(scratch, MemOperand(dst));
3391   Addu(dst, dst, 1);
3392   Subu(length, length, Operand(1));
3393   Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3394   bind(&done);
3395 }
3396 
3397 
InitializeFieldsWithFiller(Register start_offset,Register end_offset,Register filler)3398 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3399                                                 Register end_offset,
3400                                                 Register filler) {
3401   Label loop, entry;
3402   Branch(&entry);
3403   bind(&loop);
3404   sw(filler, MemOperand(start_offset));
3405   Addu(start_offset, start_offset, kPointerSize);
3406   bind(&entry);
3407   Branch(&loop, lt, start_offset, Operand(end_offset));
3408 }
3409 
3410 
CheckFastElements(Register map,Register scratch,Label * fail)3411 void MacroAssembler::CheckFastElements(Register map,
3412                                        Register scratch,
3413                                        Label* fail) {
3414   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3415   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3416   STATIC_ASSERT(FAST_ELEMENTS == 2);
3417   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3418   lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3419   Branch(fail, hi, scratch,
3420          Operand(Map::kMaximumBitField2FastHoleyElementValue));
3421 }
3422 
3423 
CheckFastObjectElements(Register map,Register scratch,Label * fail)3424 void MacroAssembler::CheckFastObjectElements(Register map,
3425                                              Register scratch,
3426                                              Label* fail) {
3427   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3428   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3429   STATIC_ASSERT(FAST_ELEMENTS == 2);
3430   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3431   lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3432   Branch(fail, ls, scratch,
3433          Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3434   Branch(fail, hi, scratch,
3435          Operand(Map::kMaximumBitField2FastHoleyElementValue));
3436 }
3437 
3438 
CheckFastSmiElements(Register map,Register scratch,Label * fail)3439 void MacroAssembler::CheckFastSmiElements(Register map,
3440                                           Register scratch,
3441                                           Label* fail) {
3442   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3443   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3444   lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3445   Branch(fail, hi, scratch,
3446          Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3447 }
3448 
3449 
StoreNumberToDoubleElements(Register value_reg,Register key_reg,Register elements_reg,Register scratch1,Register scratch2,Register scratch3,Label * fail,int elements_offset)3450 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3451                                                  Register key_reg,
3452                                                  Register elements_reg,
3453                                                  Register scratch1,
3454                                                  Register scratch2,
3455                                                  Register scratch3,
3456                                                  Label* fail,
3457                                                  int elements_offset) {
3458   Label smi_value, maybe_nan, have_double_value, is_nan, done;
3459   Register mantissa_reg = scratch2;
3460   Register exponent_reg = scratch3;
3461 
3462   // Handle smi values specially.
3463   JumpIfSmi(value_reg, &smi_value);
3464 
3465   // Ensure that the object is a heap number
3466   CheckMap(value_reg,
3467            scratch1,
3468            Heap::kHeapNumberMapRootIndex,
3469            fail,
3470            DONT_DO_SMI_CHECK);
3471 
3472   // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3473   // in the exponent.
3474   li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
3475   lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3476   Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3477 
3478   lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3479 
3480   bind(&have_double_value);
3481   sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3482   Addu(scratch1, scratch1, elements_reg);
3483   sw(mantissa_reg,
3484       FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3485           + kHoleNanLower32Offset));
3486   sw(exponent_reg,
3487       FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3488           + kHoleNanUpper32Offset));
3489   jmp(&done);
3490 
3491   bind(&maybe_nan);
3492   // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3493   // it's an Infinity, and the non-NaN code path applies.
3494   Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3495   lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3496   Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3497   bind(&is_nan);
3498   // Load canonical NaN for storing into the double array.
3499   LoadRoot(at, Heap::kNanValueRootIndex);
3500   lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
3501   lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
3502   jmp(&have_double_value);
3503 
3504   bind(&smi_value);
3505   Addu(scratch1, elements_reg,
3506       Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3507               elements_offset));
3508   sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3509   Addu(scratch1, scratch1, scratch2);
3510   // scratch1 is now effective address of the double element
3511 
3512   Register untagged_value = elements_reg;
3513   SmiUntag(untagged_value, value_reg);
3514   mtc1(untagged_value, f2);
3515   cvt_d_w(f0, f2);
3516   sdc1(f0, MemOperand(scratch1, 0));
3517   bind(&done);
3518 }
3519 
3520 
CompareMapAndBranch(Register obj,Register scratch,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)3521 void MacroAssembler::CompareMapAndBranch(Register obj,
3522                                          Register scratch,
3523                                          Handle<Map> map,
3524                                          Label* early_success,
3525                                          Condition cond,
3526                                          Label* branch_to) {
3527   lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3528   CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3529 }
3530 
3531 
CompareMapAndBranch(Register obj_map,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)3532 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3533                                          Handle<Map> map,
3534                                          Label* early_success,
3535                                          Condition cond,
3536                                          Label* branch_to) {
3537   Branch(branch_to, cond, obj_map, Operand(map));
3538 }
3539 
3540 
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)3541 void MacroAssembler::CheckMap(Register obj,
3542                               Register scratch,
3543                               Handle<Map> map,
3544                               Label* fail,
3545                               SmiCheckType smi_check_type) {
3546   if (smi_check_type == DO_SMI_CHECK) {
3547     JumpIfSmi(obj, fail);
3548   }
3549   Label success;
3550   CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
3551   bind(&success);
3552 }
3553 
3554 
DispatchMap(Register obj,Register scratch,Handle<Map> map,Handle<Code> success,SmiCheckType smi_check_type)3555 void MacroAssembler::DispatchMap(Register obj,
3556                                  Register scratch,
3557                                  Handle<Map> map,
3558                                  Handle<Code> success,
3559                                  SmiCheckType smi_check_type) {
3560   Label fail;
3561   if (smi_check_type == DO_SMI_CHECK) {
3562     JumpIfSmi(obj, &fail);
3563   }
3564   lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3565   Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
3566   bind(&fail);
3567 }
3568 
3569 
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)3570 void MacroAssembler::CheckMap(Register obj,
3571                               Register scratch,
3572                               Heap::RootListIndex index,
3573                               Label* fail,
3574                               SmiCheckType smi_check_type) {
3575   if (smi_check_type == DO_SMI_CHECK) {
3576     JumpIfSmi(obj, fail);
3577   }
3578   lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3579   LoadRoot(at, index);
3580   Branch(fail, ne, scratch, Operand(at));
3581 }
3582 
3583 
MovFromFloatResult(DoubleRegister dst)3584 void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
3585   if (IsMipsSoftFloatABI) {
3586     if (kArchEndian == kLittle) {
3587       Move(dst, v0, v1);
3588     } else {
3589       Move(dst, v1, v0);
3590     }
3591   } else {
3592     Move(dst, f0);  // Reg f0 is o32 ABI FP return value.
3593   }
3594 }
3595 
3596 
MovFromFloatParameter(DoubleRegister dst)3597 void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
3598   if (IsMipsSoftFloatABI) {
3599     if (kArchEndian == kLittle) {
3600       Move(dst, a0, a1);
3601     } else {
3602       Move(dst, a1, a0);
3603     }
3604   } else {
3605     Move(dst, f12);  // Reg f12 is o32 ABI FP first argument value.
3606   }
3607 }
3608 
3609 
MovToFloatParameter(DoubleRegister src)3610 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
3611   if (!IsMipsSoftFloatABI) {
3612     Move(f12, src);
3613   } else {
3614     if (kArchEndian == kLittle) {
3615       Move(a0, a1, src);
3616     } else {
3617       Move(a1, a0, src);
3618     }
3619   }
3620 }
3621 
3622 
MovToFloatResult(DoubleRegister src)3623 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
3624   if (!IsMipsSoftFloatABI) {
3625     Move(f0, src);
3626   } else {
3627     if (kArchEndian == kLittle) {
3628       Move(v0, v1, src);
3629     } else {
3630       Move(v1, v0, src);
3631     }
3632   }
3633 }
3634 
3635 
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)3636 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3637                                           DoubleRegister src2) {
3638   if (!IsMipsSoftFloatABI) {
3639     if (src2.is(f12)) {
3640       ASSERT(!src1.is(f14));
3641       Move(f14, src2);
3642       Move(f12, src1);
3643     } else {
3644       Move(f12, src1);
3645       Move(f14, src2);
3646     }
3647   } else {
3648     if (kArchEndian == kLittle) {
3649       Move(a0, a1, src1);
3650       Move(a2, a3, src2);
3651     } else {
3652       Move(a1, a0, src1);
3653       Move(a3, a2, src2);
3654     }
3655   }
3656 }
3657 
3658 
3659 // -----------------------------------------------------------------------------
3660 // JavaScript invokes.
3661 
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Handle<Code> code_constant,Register code_reg,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)3662 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3663                                     const ParameterCount& actual,
3664                                     Handle<Code> code_constant,
3665                                     Register code_reg,
3666                                     Label* done,
3667                                     bool* definitely_mismatches,
3668                                     InvokeFlag flag,
3669                                     const CallWrapper& call_wrapper) {
3670   bool definitely_matches = false;
3671   *definitely_mismatches = false;
3672   Label regular_invoke;
3673 
3674   // Check whether the expected and actual arguments count match. If not,
3675   // setup registers according to contract with ArgumentsAdaptorTrampoline:
3676   //  a0: actual arguments count
3677   //  a1: function (passed through to callee)
3678   //  a2: expected arguments count
3679 
3680   // The code below is made a lot easier because the calling code already sets
3681   // up actual and expected registers according to the contract if values are
3682   // passed in registers.
3683   ASSERT(actual.is_immediate() || actual.reg().is(a0));
3684   ASSERT(expected.is_immediate() || expected.reg().is(a2));
3685   ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3686 
3687   if (expected.is_immediate()) {
3688     ASSERT(actual.is_immediate());
3689     if (expected.immediate() == actual.immediate()) {
3690       definitely_matches = true;
3691     } else {
3692       li(a0, Operand(actual.immediate()));
3693       const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3694       if (expected.immediate() == sentinel) {
3695         // Don't worry about adapting arguments for builtins that
3696         // don't want that done. Skip adaption code by making it look
3697         // like we have a match between expected and actual number of
3698         // arguments.
3699         definitely_matches = true;
3700       } else {
3701         *definitely_mismatches = true;
3702         li(a2, Operand(expected.immediate()));
3703       }
3704     }
3705   } else if (actual.is_immediate()) {
3706     Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3707     li(a0, Operand(actual.immediate()));
3708   } else {
3709     Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
3710   }
3711 
3712   if (!definitely_matches) {
3713     if (!code_constant.is_null()) {
3714       li(a3, Operand(code_constant));
3715       addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3716     }
3717 
3718     Handle<Code> adaptor =
3719         isolate()->builtins()->ArgumentsAdaptorTrampoline();
3720     if (flag == CALL_FUNCTION) {
3721       call_wrapper.BeforeCall(CallSize(adaptor));
3722       Call(adaptor);
3723       call_wrapper.AfterCall();
3724       if (!*definitely_mismatches) {
3725         Branch(done);
3726       }
3727     } else {
3728       Jump(adaptor, RelocInfo::CODE_TARGET);
3729     }
3730     bind(&regular_invoke);
3731   }
3732 }
3733 
3734 
InvokeCode(Register code,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)3735 void MacroAssembler::InvokeCode(Register code,
3736                                 const ParameterCount& expected,
3737                                 const ParameterCount& actual,
3738                                 InvokeFlag flag,
3739                                 const CallWrapper& call_wrapper) {
3740   // You can't call a function without a valid frame.
3741   ASSERT(flag == JUMP_FUNCTION || has_frame());
3742 
3743   Label done;
3744 
3745   bool definitely_mismatches = false;
3746   InvokePrologue(expected, actual, Handle<Code>::null(), code,
3747                  &done, &definitely_mismatches, flag,
3748                  call_wrapper);
3749   if (!definitely_mismatches) {
3750     if (flag == CALL_FUNCTION) {
3751       call_wrapper.BeforeCall(CallSize(code));
3752       Call(code);
3753       call_wrapper.AfterCall();
3754     } else {
3755       ASSERT(flag == JUMP_FUNCTION);
3756       Jump(code);
3757     }
3758     // Continue here if InvokePrologue does handle the invocation due to
3759     // mismatched parameter counts.
3760     bind(&done);
3761   }
3762 }
3763 
3764 
InvokeFunction(Register function,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)3765 void MacroAssembler::InvokeFunction(Register function,
3766                                     const ParameterCount& actual,
3767                                     InvokeFlag flag,
3768                                     const CallWrapper& call_wrapper) {
3769   // You can't call a function without a valid frame.
3770   ASSERT(flag == JUMP_FUNCTION || has_frame());
3771 
3772   // Contract with called JS functions requires that function is passed in a1.
3773   ASSERT(function.is(a1));
3774   Register expected_reg = a2;
3775   Register code_reg = a3;
3776 
3777   lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3778   lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3779   lw(expected_reg,
3780       FieldMemOperand(code_reg,
3781                       SharedFunctionInfo::kFormalParameterCountOffset));
3782   sra(expected_reg, expected_reg, kSmiTagSize);
3783   lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3784 
3785   ParameterCount expected(expected_reg);
3786   InvokeCode(code_reg, expected, actual, flag, call_wrapper);
3787 }
3788 
3789 
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)3790 void MacroAssembler::InvokeFunction(Register function,
3791                                     const ParameterCount& expected,
3792                                     const ParameterCount& actual,
3793                                     InvokeFlag flag,
3794                                     const CallWrapper& call_wrapper) {
3795   // You can't call a function without a valid frame.
3796   ASSERT(flag == JUMP_FUNCTION || has_frame());
3797 
3798   // Contract with called JS functions requires that function is passed in a1.
3799   ASSERT(function.is(a1));
3800 
3801   // Get the function and setup the context.
3802   lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3803 
3804   // We call indirectly through the code field in the function to
3805   // allow recompilation to take effect without changing any of the
3806   // call sites.
3807   lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3808   InvokeCode(a3, expected, actual, flag, call_wrapper);
3809 }
3810 
3811 
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)3812 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3813                                     const ParameterCount& expected,
3814                                     const ParameterCount& actual,
3815                                     InvokeFlag flag,
3816                                     const CallWrapper& call_wrapper) {
3817   li(a1, function);
3818   InvokeFunction(a1, expected, actual, flag, call_wrapper);
3819 }
3820 
3821 
IsObjectJSObjectType(Register heap_object,Register map,Register scratch,Label * fail)3822 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3823                                           Register map,
3824                                           Register scratch,
3825                                           Label* fail) {
3826   lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
3827   IsInstanceJSObjectType(map, scratch, fail);
3828 }
3829 
3830 
IsInstanceJSObjectType(Register map,Register scratch,Label * fail)3831 void MacroAssembler::IsInstanceJSObjectType(Register map,
3832                                             Register scratch,
3833                                             Label* fail) {
3834   lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
3835   Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
3836   Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
3837 }
3838 
3839 
IsObjectJSStringType(Register object,Register scratch,Label * fail)3840 void MacroAssembler::IsObjectJSStringType(Register object,
3841                                           Register scratch,
3842                                           Label* fail) {
3843   ASSERT(kNotStringTag != 0);
3844 
3845   lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3846   lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3847   And(scratch, scratch, Operand(kIsNotStringMask));
3848   Branch(fail, ne, scratch, Operand(zero_reg));
3849 }
3850 
3851 
IsObjectNameType(Register object,Register scratch,Label * fail)3852 void MacroAssembler::IsObjectNameType(Register object,
3853                                       Register scratch,
3854                                       Label* fail) {
3855   lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3856   lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3857   Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
3858 }
3859 
3860 
3861 // ---------------------------------------------------------------------------
3862 // Support functions.
3863 
3864 
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss,bool miss_on_bound_function)3865 void MacroAssembler::TryGetFunctionPrototype(Register function,
3866                                              Register result,
3867                                              Register scratch,
3868                                              Label* miss,
3869                                              bool miss_on_bound_function) {
3870   // Check that the receiver isn't a smi.
3871   JumpIfSmi(function, miss);
3872 
3873   // Check that the function really is a function.  Load map into result reg.
3874   GetObjectType(function, result, scratch);
3875   Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
3876 
3877   if (miss_on_bound_function) {
3878     lw(scratch,
3879        FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3880     lw(scratch,
3881        FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3882     And(scratch, scratch,
3883         Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
3884     Branch(miss, ne, scratch, Operand(zero_reg));
3885   }
3886 
3887   // Make sure that the function has an instance prototype.
3888   Label non_instance;
3889   lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3890   And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3891   Branch(&non_instance, ne, scratch, Operand(zero_reg));
3892 
3893   // Get the prototype or initial map from the function.
3894   lw(result,
3895      FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3896 
3897   // If the prototype or initial map is the hole, don't return it and
3898   // simply miss the cache instead. This will allow us to allocate a
3899   // prototype object on-demand in the runtime system.
3900   LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3901   Branch(miss, eq, result, Operand(t8));
3902 
3903   // If the function does not have an initial map, we're done.
3904   Label done;
3905   GetObjectType(result, scratch, scratch);
3906   Branch(&done, ne, scratch, Operand(MAP_TYPE));
3907 
3908   // Get the prototype from the initial map.
3909   lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3910   jmp(&done);
3911 
3912   // Non-instance prototype: Fetch prototype from constructor field
3913   // in initial map.
3914   bind(&non_instance);
3915   lw(result, FieldMemOperand(result, Map::kConstructorOffset));
3916 
3917   // All done.
3918   bind(&done);
3919 }
3920 
3921 
GetObjectType(Register object,Register map,Register type_reg)3922 void MacroAssembler::GetObjectType(Register object,
3923                                    Register map,
3924                                    Register type_reg) {
3925   lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
3926   lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3927 }
3928 
3929 
3930 // -----------------------------------------------------------------------------
3931 // Runtime calls.
3932 
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)3933 void MacroAssembler::CallStub(CodeStub* stub,
3934                               TypeFeedbackId ast_id,
3935                               Condition cond,
3936                               Register r1,
3937                               const Operand& r2,
3938                               BranchDelaySlot bd) {
3939   ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
3940   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
3941        cond, r1, r2, bd);
3942 }
3943 
3944 
TailCallStub(CodeStub * stub,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)3945 void MacroAssembler::TailCallStub(CodeStub* stub,
3946                                   Condition cond,
3947                                   Register r1,
3948                                   const Operand& r2,
3949                                   BranchDelaySlot bd) {
3950   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
3951 }
3952 
3953 
AddressOffset(ExternalReference ref0,ExternalReference ref1)3954 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3955   return ref0.address() - ref1.address();
3956 }
3957 
3958 
CallApiFunctionAndReturn(Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand return_value_operand,MemOperand * context_restore_operand)3959 void MacroAssembler::CallApiFunctionAndReturn(
3960     Register function_address,
3961     ExternalReference thunk_ref,
3962     int stack_space,
3963     MemOperand return_value_operand,
3964     MemOperand* context_restore_operand) {
3965   ExternalReference next_address =
3966       ExternalReference::handle_scope_next_address(isolate());
3967   const int kNextOffset = 0;
3968   const int kLimitOffset = AddressOffset(
3969       ExternalReference::handle_scope_limit_address(isolate()),
3970       next_address);
3971   const int kLevelOffset = AddressOffset(
3972       ExternalReference::handle_scope_level_address(isolate()),
3973       next_address);
3974 
3975   ASSERT(function_address.is(a1) || function_address.is(a2));
3976 
3977   Label profiler_disabled;
3978   Label end_profiler_check;
3979   li(t9, Operand(ExternalReference::is_profiling_address(isolate())));
3980   lb(t9, MemOperand(t9, 0));
3981   Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
3982 
3983   // Additional parameter is the address of the actual callback.
3984   li(t9, Operand(thunk_ref));
3985   jmp(&end_profiler_check);
3986 
3987   bind(&profiler_disabled);
3988   mov(t9, function_address);
3989   bind(&end_profiler_check);
3990 
3991   // Allocate HandleScope in callee-save registers.
3992   li(s3, Operand(next_address));
3993   lw(s0, MemOperand(s3, kNextOffset));
3994   lw(s1, MemOperand(s3, kLimitOffset));
3995   lw(s2, MemOperand(s3, kLevelOffset));
3996   Addu(s2, s2, Operand(1));
3997   sw(s2, MemOperand(s3, kLevelOffset));
3998 
3999   if (FLAG_log_timer_events) {
4000     FrameScope frame(this, StackFrame::MANUAL);
4001     PushSafepointRegisters();
4002     PrepareCallCFunction(1, a0);
4003     li(a0, Operand(ExternalReference::isolate_address(isolate())));
4004     CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
4005     PopSafepointRegisters();
4006   }
4007 
4008   // Native call returns to the DirectCEntry stub which redirects to the
4009   // return address pushed on stack (could have moved after GC).
4010   // DirectCEntry stub itself is generated early and never moves.
4011   DirectCEntryStub stub(isolate());
4012   stub.GenerateCall(this, t9);
4013 
4014   if (FLAG_log_timer_events) {
4015     FrameScope frame(this, StackFrame::MANUAL);
4016     PushSafepointRegisters();
4017     PrepareCallCFunction(1, a0);
4018     li(a0, Operand(ExternalReference::isolate_address(isolate())));
4019     CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
4020     PopSafepointRegisters();
4021   }
4022 
4023   Label promote_scheduled_exception;
4024   Label exception_handled;
4025   Label delete_allocated_handles;
4026   Label leave_exit_frame;
4027   Label return_value_loaded;
4028 
4029   // Load value from ReturnValue.
4030   lw(v0, return_value_operand);
4031   bind(&return_value_loaded);
4032 
4033   // No more valid handles (the result handle was the last one). Restore
4034   // previous handle scope.
4035   sw(s0, MemOperand(s3, kNextOffset));
4036   if (emit_debug_code()) {
4037     lw(a1, MemOperand(s3, kLevelOffset));
4038     Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
4039   }
4040   Subu(s2, s2, Operand(1));
4041   sw(s2, MemOperand(s3, kLevelOffset));
4042   lw(at, MemOperand(s3, kLimitOffset));
4043   Branch(&delete_allocated_handles, ne, s1, Operand(at));
4044 
4045   // Check if the function scheduled an exception.
4046   bind(&leave_exit_frame);
4047   LoadRoot(t0, Heap::kTheHoleValueRootIndex);
4048   li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
4049   lw(t1, MemOperand(at));
4050   Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
4051   bind(&exception_handled);
4052 
4053   bool restore_context = context_restore_operand != NULL;
4054   if (restore_context) {
4055     lw(cp, *context_restore_operand);
4056   }
4057   li(s0, Operand(stack_space));
4058   LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
4059 
4060   bind(&promote_scheduled_exception);
4061   {
4062     FrameScope frame(this, StackFrame::INTERNAL);
4063     CallExternalReference(
4064         ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()),
4065         0);
4066   }
4067   jmp(&exception_handled);
4068 
4069   // HandleScope limit has changed. Delete allocated extensions.
4070   bind(&delete_allocated_handles);
4071   sw(s1, MemOperand(s3, kLimitOffset));
4072   mov(s0, v0);
4073   mov(a0, v0);
4074   PrepareCallCFunction(1, s1);
4075   li(a0, Operand(ExternalReference::isolate_address(isolate())));
4076   CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
4077       1);
4078   mov(v0, s0);
4079   jmp(&leave_exit_frame);
4080 }
4081 
4082 
AllowThisStubCall(CodeStub * stub)4083 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4084   return has_frame_ || !stub->SometimesSetsUpAFrame();
4085 }
4086 
4087 
IndexFromHash(Register hash,Register index)4088 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4089   // If the hash field contains an array index pick it out. The assert checks
4090   // that the constants for the maximum number of digits for an array index
4091   // cached in the hash field and the number of bits reserved for it does not
4092   // conflict.
4093   ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
4094          (1 << String::kArrayIndexValueBits));
4095   DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
4096 }
4097 
4098 
ObjectToDoubleFPURegister(Register object,FPURegister result,Register scratch1,Register scratch2,Register heap_number_map,Label * not_number,ObjectToDoubleFlags flags)4099 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4100                                                FPURegister result,
4101                                                Register scratch1,
4102                                                Register scratch2,
4103                                                Register heap_number_map,
4104                                                Label* not_number,
4105                                                ObjectToDoubleFlags flags) {
4106   Label done;
4107   if ((flags & OBJECT_NOT_SMI) == 0) {
4108     Label not_smi;
4109     JumpIfNotSmi(object, &not_smi);
4110     // Remove smi tag and convert to double.
4111     sra(scratch1, object, kSmiTagSize);
4112     mtc1(scratch1, result);
4113     cvt_d_w(result, result);
4114     Branch(&done);
4115     bind(&not_smi);
4116   }
4117   // Check for heap number and load double value from it.
4118   lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4119   Branch(not_number, ne, scratch1, Operand(heap_number_map));
4120 
4121   if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4122     // If exponent is all ones the number is either a NaN or +/-Infinity.
4123     Register exponent = scratch1;
4124     Register mask_reg = scratch2;
4125     lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4126     li(mask_reg, HeapNumber::kExponentMask);
4127 
4128     And(exponent, exponent, mask_reg);
4129     Branch(not_number, eq, exponent, Operand(mask_reg));
4130   }
4131   ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4132   bind(&done);
4133 }
4134 
4135 
SmiToDoubleFPURegister(Register smi,FPURegister value,Register scratch1)4136 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4137                                             FPURegister value,
4138                                             Register scratch1) {
4139   sra(scratch1, smi, kSmiTagSize);
4140   mtc1(scratch1, value);
4141   cvt_d_w(value, value);
4142 }
4143 
4144 
AdduAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)4145 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4146                                              Register left,
4147                                              Register right,
4148                                              Register overflow_dst,
4149                                              Register scratch) {
4150   ASSERT(!dst.is(overflow_dst));
4151   ASSERT(!dst.is(scratch));
4152   ASSERT(!overflow_dst.is(scratch));
4153   ASSERT(!overflow_dst.is(left));
4154   ASSERT(!overflow_dst.is(right));
4155 
4156   if (left.is(right) && dst.is(left)) {
4157     ASSERT(!dst.is(t9));
4158     ASSERT(!scratch.is(t9));
4159     ASSERT(!left.is(t9));
4160     ASSERT(!right.is(t9));
4161     ASSERT(!overflow_dst.is(t9));
4162     mov(t9, right);
4163     right = t9;
4164   }
4165 
4166   if (dst.is(left)) {
4167     mov(scratch, left);  // Preserve left.
4168     addu(dst, left, right);  // Left is overwritten.
4169     xor_(scratch, dst, scratch);  // Original left.
4170     xor_(overflow_dst, dst, right);
4171     and_(overflow_dst, overflow_dst, scratch);
4172   } else if (dst.is(right)) {
4173     mov(scratch, right);  // Preserve right.
4174     addu(dst, left, right);  // Right is overwritten.
4175     xor_(scratch, dst, scratch);  // Original right.
4176     xor_(overflow_dst, dst, left);
4177     and_(overflow_dst, overflow_dst, scratch);
4178   } else {
4179     addu(dst, left, right);
4180     xor_(overflow_dst, dst, left);
4181     xor_(scratch, dst, right);
4182     and_(overflow_dst, scratch, overflow_dst);
4183   }
4184 }
4185 
4186 
SubuAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)4187 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4188                                              Register left,
4189                                              Register right,
4190                                              Register overflow_dst,
4191                                              Register scratch) {
4192   ASSERT(!dst.is(overflow_dst));
4193   ASSERT(!dst.is(scratch));
4194   ASSERT(!overflow_dst.is(scratch));
4195   ASSERT(!overflow_dst.is(left));
4196   ASSERT(!overflow_dst.is(right));
4197   ASSERT(!scratch.is(left));
4198   ASSERT(!scratch.is(right));
4199 
4200   // This happens with some crankshaft code. Since Subu works fine if
4201   // left == right, let's not make that restriction here.
4202   if (left.is(right)) {
4203     mov(dst, zero_reg);
4204     mov(overflow_dst, zero_reg);
4205     return;
4206   }
4207 
4208   if (dst.is(left)) {
4209     mov(scratch, left);  // Preserve left.
4210     subu(dst, left, right);  // Left is overwritten.
4211     xor_(overflow_dst, dst, scratch);  // scratch is original left.
4212     xor_(scratch, scratch, right);  // scratch is original left.
4213     and_(overflow_dst, scratch, overflow_dst);
4214   } else if (dst.is(right)) {
4215     mov(scratch, right);  // Preserve right.
4216     subu(dst, left, right);  // Right is overwritten.
4217     xor_(overflow_dst, dst, left);
4218     xor_(scratch, left, scratch);  // Original right.
4219     and_(overflow_dst, scratch, overflow_dst);
4220   } else {
4221     subu(dst, left, right);
4222     xor_(overflow_dst, dst, left);
4223     xor_(scratch, left, right);
4224     and_(overflow_dst, scratch, overflow_dst);
4225   }
4226 }
4227 
4228 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)4229 void MacroAssembler::CallRuntime(const Runtime::Function* f,
4230                                  int num_arguments,
4231                                  SaveFPRegsMode save_doubles) {
4232   // All parameters are on the stack. v0 has the return value after call.
4233 
4234   // If the expected number of arguments of the runtime function is
4235   // constant, we check that the actual number of arguments match the
4236   // expectation.
4237   CHECK(f->nargs < 0 || f->nargs == num_arguments);
4238 
4239   // TODO(1236192): Most runtime routines don't need the number of
4240   // arguments passed in because it is constant. At some point we
4241   // should remove this need and make the runtime routine entry code
4242   // smarter.
4243   PrepareCEntryArgs(num_arguments);
4244   PrepareCEntryFunction(ExternalReference(f, isolate()));
4245   CEntryStub stub(isolate(), 1, save_doubles);
4246   CallStub(&stub);
4247 }
4248 
4249 
CallExternalReference(const ExternalReference & ext,int num_arguments,BranchDelaySlot bd)4250 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4251                                            int num_arguments,
4252                                            BranchDelaySlot bd) {
4253   PrepareCEntryArgs(num_arguments);
4254   PrepareCEntryFunction(ext);
4255 
4256   CEntryStub stub(isolate(), 1);
4257   CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4258 }
4259 
4260 
TailCallExternalReference(const ExternalReference & ext,int num_arguments,int result_size)4261 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4262                                                int num_arguments,
4263                                                int result_size) {
4264   // TODO(1236192): Most runtime routines don't need the number of
4265   // arguments passed in because it is constant. At some point we
4266   // should remove this need and make the runtime routine entry code
4267   // smarter.
4268   PrepareCEntryArgs(num_arguments);
4269   JumpToExternalReference(ext);
4270 }
4271 
4272 
TailCallRuntime(Runtime::FunctionId fid,int num_arguments,int result_size)4273 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4274                                      int num_arguments,
4275                                      int result_size) {
4276   TailCallExternalReference(ExternalReference(fid, isolate()),
4277                             num_arguments,
4278                             result_size);
4279 }
4280 
4281 
JumpToExternalReference(const ExternalReference & builtin,BranchDelaySlot bd)4282 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4283                                              BranchDelaySlot bd) {
4284   PrepareCEntryFunction(builtin);
4285   CEntryStub stub(isolate(), 1);
4286   Jump(stub.GetCode(),
4287        RelocInfo::CODE_TARGET,
4288        al,
4289        zero_reg,
4290        Operand(zero_reg),
4291        bd);
4292 }
4293 
4294 
InvokeBuiltin(Builtins::JavaScript id,InvokeFlag flag,const CallWrapper & call_wrapper)4295 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4296                                    InvokeFlag flag,
4297                                    const CallWrapper& call_wrapper) {
4298   // You can't call a builtin without a valid frame.
4299   ASSERT(flag == JUMP_FUNCTION || has_frame());
4300 
4301   GetBuiltinEntry(t9, id);
4302   if (flag == CALL_FUNCTION) {
4303     call_wrapper.BeforeCall(CallSize(t9));
4304     Call(t9);
4305     call_wrapper.AfterCall();
4306   } else {
4307     ASSERT(flag == JUMP_FUNCTION);
4308     Jump(t9);
4309   }
4310 }
4311 
4312 
GetBuiltinFunction(Register target,Builtins::JavaScript id)4313 void MacroAssembler::GetBuiltinFunction(Register target,
4314                                         Builtins::JavaScript id) {
4315   // Load the builtins object into target register.
4316   lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4317   lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4318   // Load the JavaScript builtin function from the builtins object.
4319   lw(target, FieldMemOperand(target,
4320                           JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4321 }
4322 
4323 
GetBuiltinEntry(Register target,Builtins::JavaScript id)4324 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4325   ASSERT(!target.is(a1));
4326   GetBuiltinFunction(a1, id);
4327   // Load the code entry point from the builtins object.
4328   lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4329 }
4330 
4331 
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)4332 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4333                                 Register scratch1, Register scratch2) {
4334   if (FLAG_native_code_counters && counter->Enabled()) {
4335     li(scratch1, Operand(value));
4336     li(scratch2, Operand(ExternalReference(counter)));
4337     sw(scratch1, MemOperand(scratch2));
4338   }
4339 }
4340 
4341 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)4342 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4343                                       Register scratch1, Register scratch2) {
4344   ASSERT(value > 0);
4345   if (FLAG_native_code_counters && counter->Enabled()) {
4346     li(scratch2, Operand(ExternalReference(counter)));
4347     lw(scratch1, MemOperand(scratch2));
4348     Addu(scratch1, scratch1, Operand(value));
4349     sw(scratch1, MemOperand(scratch2));
4350   }
4351 }
4352 
4353 
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)4354 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4355                                       Register scratch1, Register scratch2) {
4356   ASSERT(value > 0);
4357   if (FLAG_native_code_counters && counter->Enabled()) {
4358     li(scratch2, Operand(ExternalReference(counter)));
4359     lw(scratch1, MemOperand(scratch2));
4360     Subu(scratch1, scratch1, Operand(value));
4361     sw(scratch1, MemOperand(scratch2));
4362   }
4363 }
4364 
4365 
4366 // -----------------------------------------------------------------------------
4367 // Debugging.
4368 
Assert(Condition cc,BailoutReason reason,Register rs,Operand rt)4369 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4370                             Register rs, Operand rt) {
4371   if (emit_debug_code())
4372     Check(cc, reason, rs, rt);
4373 }
4374 
4375 
AssertFastElements(Register elements)4376 void MacroAssembler::AssertFastElements(Register elements) {
4377   if (emit_debug_code()) {
4378     ASSERT(!elements.is(at));
4379     Label ok;
4380     push(elements);
4381     lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4382     LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4383     Branch(&ok, eq, elements, Operand(at));
4384     LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4385     Branch(&ok, eq, elements, Operand(at));
4386     LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4387     Branch(&ok, eq, elements, Operand(at));
4388     Abort(kJSObjectWithFastElementsMapHasSlowElements);
4389     bind(&ok);
4390     pop(elements);
4391   }
4392 }
4393 
4394 
Check(Condition cc,BailoutReason reason,Register rs,Operand rt)4395 void MacroAssembler::Check(Condition cc, BailoutReason reason,
4396                            Register rs, Operand rt) {
4397   Label L;
4398   Branch(&L, cc, rs, rt);
4399   Abort(reason);
4400   // Will not return here.
4401   bind(&L);
4402 }
4403 
4404 
Abort(BailoutReason reason)4405 void MacroAssembler::Abort(BailoutReason reason) {
4406   Label abort_start;
4407   bind(&abort_start);
4408 #ifdef DEBUG
4409   const char* msg = GetBailoutReason(reason);
4410   if (msg != NULL) {
4411     RecordComment("Abort message: ");
4412     RecordComment(msg);
4413   }
4414 
4415   if (FLAG_trap_on_abort) {
4416     stop(msg);
4417     return;
4418   }
4419 #endif
4420 
4421   li(a0, Operand(Smi::FromInt(reason)));
4422   push(a0);
4423   // Disable stub call restrictions to always allow calls to abort.
4424   if (!has_frame_) {
4425     // We don't actually want to generate a pile of code for this, so just
4426     // claim there is a stack frame, without generating one.
4427     FrameScope scope(this, StackFrame::NONE);
4428     CallRuntime(Runtime::kAbort, 1);
4429   } else {
4430     CallRuntime(Runtime::kAbort, 1);
4431   }
4432   // Will not return here.
4433   if (is_trampoline_pool_blocked()) {
4434     // If the calling code cares about the exact number of
4435     // instructions generated, we insert padding here to keep the size
4436     // of the Abort macro constant.
4437     // Currently in debug mode with debug_code enabled the number of
4438     // generated instructions is 10, so we use this as a maximum value.
4439     static const int kExpectedAbortInstructions = 10;
4440     int abort_instructions = InstructionsGeneratedSince(&abort_start);
4441     ASSERT(abort_instructions <= kExpectedAbortInstructions);
4442     while (abort_instructions++ < kExpectedAbortInstructions) {
4443       nop();
4444     }
4445   }
4446 }
4447 
4448 
LoadContext(Register dst,int context_chain_length)4449 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4450   if (context_chain_length > 0) {
4451     // Move up the chain of contexts to the context containing the slot.
4452     lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4453     for (int i = 1; i < context_chain_length; i++) {
4454       lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4455     }
4456   } else {
4457     // Slot is in the current function context.  Move it into the
4458     // destination register in case we store into it (the write barrier
4459     // cannot be allowed to destroy the context in esi).
4460     Move(dst, cp);
4461   }
4462 }
4463 
4464 
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)4465 void MacroAssembler::LoadTransitionedArrayMapConditional(
4466     ElementsKind expected_kind,
4467     ElementsKind transitioned_kind,
4468     Register map_in_out,
4469     Register scratch,
4470     Label* no_map_match) {
4471   // Load the global or builtins object from the current context.
4472   lw(scratch,
4473      MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4474   lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4475 
4476   // Check that the function's map is the same as the expected cached map.
4477   lw(scratch,
4478      MemOperand(scratch,
4479                 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4480   size_t offset = expected_kind * kPointerSize +
4481       FixedArrayBase::kHeaderSize;
4482   lw(at, FieldMemOperand(scratch, offset));
4483   Branch(no_map_match, ne, map_in_out, Operand(at));
4484 
4485   // Use the transitioned cached map.
4486   offset = transitioned_kind * kPointerSize +
4487       FixedArrayBase::kHeaderSize;
4488   lw(map_in_out, FieldMemOperand(scratch, offset));
4489 }
4490 
4491 
LoadGlobalFunction(int index,Register function)4492 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4493   // Load the global or builtins object from the current context.
4494   lw(function,
4495      MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4496   // Load the native context from the global or builtins object.
4497   lw(function, FieldMemOperand(function,
4498                                GlobalObject::kNativeContextOffset));
4499   // Load the function from the native context.
4500   lw(function, MemOperand(function, Context::SlotOffset(index)));
4501 }
4502 
4503 
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)4504 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4505                                                   Register map,
4506                                                   Register scratch) {
4507   // Load the initial map. The global functions all have initial maps.
4508   lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4509   if (emit_debug_code()) {
4510     Label ok, fail;
4511     CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4512     Branch(&ok);
4513     bind(&fail);
4514     Abort(kGlobalFunctionsMustHaveInitialMap);
4515     bind(&ok);
4516   }
4517 }
4518 
4519 
StubPrologue()4520 void MacroAssembler::StubPrologue() {
4521     Push(ra, fp, cp);
4522     Push(Smi::FromInt(StackFrame::STUB));
4523     // Adjust FP to point to saved FP.
4524     Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4525 }
4526 
4527 
Prologue(bool code_pre_aging)4528 void MacroAssembler::Prologue(bool code_pre_aging) {
4529   PredictableCodeSizeScope predictible_code_size_scope(
4530       this, kNoCodeAgeSequenceLength);
4531   // The following three instructions must remain together and unmodified
4532   // for code aging to work properly.
4533   if (code_pre_aging) {
4534     // Pre-age the code.
4535     Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4536     nop(Assembler::CODE_AGE_MARKER_NOP);
4537     // Load the stub address to t9 and call it,
4538     // GetCodeAgeAndParity() extracts the stub address from this instruction.
4539     li(t9,
4540        Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
4541        CONSTANT_SIZE);
4542     nop();  // Prevent jalr to jal optimization.
4543     jalr(t9, a0);
4544     nop();  // Branch delay slot nop.
4545     nop();  // Pad the empty space.
4546   } else {
4547     Push(ra, fp, cp, a1);
4548     nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4549     // Adjust fp to point to caller's fp.
4550     Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4551   }
4552 }
4553 
4554 
EnterFrame(StackFrame::Type type)4555 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4556   addiu(sp, sp, -5 * kPointerSize);
4557   li(t8, Operand(Smi::FromInt(type)));
4558   li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4559   sw(ra, MemOperand(sp, 4 * kPointerSize));
4560   sw(fp, MemOperand(sp, 3 * kPointerSize));
4561   sw(cp, MemOperand(sp, 2 * kPointerSize));
4562   sw(t8, MemOperand(sp, 1 * kPointerSize));
4563   sw(t9, MemOperand(sp, 0 * kPointerSize));
4564   // Adjust FP to point to saved FP.
4565   Addu(fp, sp,
4566        Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
4567 }
4568 
4569 
LeaveFrame(StackFrame::Type type)4570 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4571   mov(sp, fp);
4572   lw(fp, MemOperand(sp, 0 * kPointerSize));
4573   lw(ra, MemOperand(sp, 1 * kPointerSize));
4574   addiu(sp, sp, 2 * kPointerSize);
4575 }
4576 
4577 
EnterExitFrame(bool save_doubles,int stack_space)4578 void MacroAssembler::EnterExitFrame(bool save_doubles,
4579                                     int stack_space) {
4580   // Set up the frame structure on the stack.
4581   STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4582   STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4583   STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4584 
4585   // This is how the stack will look:
4586   // fp + 2 (==kCallerSPDisplacement) - old stack's end
4587   // [fp + 1 (==kCallerPCOffset)] - saved old ra
4588   // [fp + 0 (==kCallerFPOffset)] - saved old fp
4589   // [fp - 1 (==kSPOffset)] - sp of the called function
4590   // [fp - 2 (==kCodeOffset)] - CodeObject
4591   // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4592   //   new stack (will contain saved ra)
4593 
4594   // Save registers.
4595   addiu(sp, sp, -4 * kPointerSize);
4596   sw(ra, MemOperand(sp, 3 * kPointerSize));
4597   sw(fp, MemOperand(sp, 2 * kPointerSize));
4598   addiu(fp, sp, 2 * kPointerSize);  // Set up new frame pointer.
4599 
4600   if (emit_debug_code()) {
4601     sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4602   }
4603 
4604   // Accessed from ExitFrame::code_slot.
4605   li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4606   sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4607 
4608   // Save the frame pointer and the context in top.
4609   li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4610   sw(fp, MemOperand(t8));
4611   li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4612   sw(cp, MemOperand(t8));
4613 
4614   const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4615   if (save_doubles) {
4616     // The stack  must be allign to 0 modulo 8 for stores with sdc1.
4617     ASSERT(kDoubleSize == frame_alignment);
4618     if (frame_alignment > 0) {
4619       ASSERT(IsPowerOf2(frame_alignment));
4620       And(sp, sp, Operand(-frame_alignment));  // Align stack.
4621     }
4622     int space = FPURegister::kMaxNumRegisters * kDoubleSize;
4623     Subu(sp, sp, Operand(space));
4624     // Remember: we only need to save every 2nd double FPU value.
4625     for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4626       FPURegister reg = FPURegister::from_code(i);
4627       sdc1(reg, MemOperand(sp, i * kDoubleSize));
4628     }
4629   }
4630 
4631   // Reserve place for the return address, stack space and an optional slot
4632   // (used by the DirectCEntryStub to hold the return value if a struct is
4633   // returned) and align the frame preparing for calling the runtime function.
4634   ASSERT(stack_space >= 0);
4635   Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4636   if (frame_alignment > 0) {
4637     ASSERT(IsPowerOf2(frame_alignment));
4638     And(sp, sp, Operand(-frame_alignment));  // Align stack.
4639   }
4640 
4641   // Set the exit frame sp value to point just before the return address
4642   // location.
4643   addiu(at, sp, kPointerSize);
4644   sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4645 }
4646 
4647 
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool do_return)4648 void MacroAssembler::LeaveExitFrame(bool save_doubles,
4649                                     Register argument_count,
4650                                     bool restore_context,
4651                                     bool do_return) {
4652   // Optionally restore all double registers.
4653   if (save_doubles) {
4654     // Remember: we only need to restore every 2nd double FPU value.
4655     lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
4656     for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4657       FPURegister reg = FPURegister::from_code(i);
4658       ldc1(reg, MemOperand(t8, i  * kDoubleSize + kPointerSize));
4659     }
4660   }
4661 
4662   // Clear top frame.
4663   li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4664   sw(zero_reg, MemOperand(t8));
4665 
4666   // Restore current context from top and clear it in debug mode.
4667   if (restore_context) {
4668     li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4669     lw(cp, MemOperand(t8));
4670   }
4671 #ifdef DEBUG
4672   li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4673   sw(a3, MemOperand(t8));
4674 #endif
4675 
4676   // Pop the arguments, restore registers, and return.
4677   mov(sp, fp);  // Respect ABI stack constraint.
4678   lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4679   lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4680 
4681   if (argument_count.is_valid()) {
4682     sll(t8, argument_count, kPointerSizeLog2);
4683     addu(sp, sp, t8);
4684   }
4685 
4686   if (do_return) {
4687     Ret(USE_DELAY_SLOT);
4688     // If returning, the instruction in the delay slot will be the addiu below.
4689   }
4690   addiu(sp, sp, 8);
4691 }
4692 
4693 
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)4694 void MacroAssembler::InitializeNewString(Register string,
4695                                          Register length,
4696                                          Heap::RootListIndex map_index,
4697                                          Register scratch1,
4698                                          Register scratch2) {
4699   sll(scratch1, length, kSmiTagSize);
4700   LoadRoot(scratch2, map_index);
4701   sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
4702   li(scratch1, Operand(String::kEmptyHashField));
4703   sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4704   sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4705 }
4706 
4707 
ActivationFrameAlignment()4708 int MacroAssembler::ActivationFrameAlignment() {
4709 #if V8_HOST_ARCH_MIPS
4710   // Running on the real platform. Use the alignment as mandated by the local
4711   // environment.
4712   // Note: This will break if we ever start generating snapshots on one Mips
4713   // platform for another Mips platform with a different alignment.
4714   return OS::ActivationFrameAlignment();
4715 #else  // V8_HOST_ARCH_MIPS
4716   // If we are using the simulator then we should always align to the expected
4717   // alignment. As the simulator is used to generate snapshots we do not know
4718   // if the target platform will need alignment, so this is controlled from a
4719   // flag.
4720   return FLAG_sim_stack_alignment;
4721 #endif  // V8_HOST_ARCH_MIPS
4722 }
4723 
4724 
AssertStackIsAligned()4725 void MacroAssembler::AssertStackIsAligned() {
4726   if (emit_debug_code()) {
4727       const int frame_alignment = ActivationFrameAlignment();
4728       const int frame_alignment_mask = frame_alignment - 1;
4729 
4730       if (frame_alignment > kPointerSize) {
4731         Label alignment_as_expected;
4732         ASSERT(IsPowerOf2(frame_alignment));
4733         andi(at, sp, frame_alignment_mask);
4734         Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4735         // Don't use Check here, as it will call Runtime_Abort re-entering here.
4736         stop("Unexpected stack alignment");
4737         bind(&alignment_as_expected);
4738       }
4739     }
4740 }
4741 
4742 
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)4743 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4744     Register reg,
4745     Register scratch,
4746     Label* not_power_of_two_or_zero) {
4747   Subu(scratch, reg, Operand(1));
4748   Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4749          scratch, Operand(zero_reg));
4750   and_(at, scratch, reg);  // In the delay slot.
4751   Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4752 }
4753 
4754 
SmiTagCheckOverflow(Register reg,Register overflow)4755 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
4756   ASSERT(!reg.is(overflow));
4757   mov(overflow, reg);  // Save original value.
4758   SmiTag(reg);
4759   xor_(overflow, overflow, reg);  // Overflow if (value ^ 2 * value) < 0.
4760 }
4761 
4762 
SmiTagCheckOverflow(Register dst,Register src,Register overflow)4763 void MacroAssembler::SmiTagCheckOverflow(Register dst,
4764                                          Register src,
4765                                          Register overflow) {
4766   if (dst.is(src)) {
4767     // Fall back to slower case.
4768     SmiTagCheckOverflow(dst, overflow);
4769   } else {
4770     ASSERT(!dst.is(src));
4771     ASSERT(!dst.is(overflow));
4772     ASSERT(!src.is(overflow));
4773     SmiTag(dst, src);
4774     xor_(overflow, dst, src);  // Overflow if (value ^ 2 * value) < 0.
4775   }
4776 }
4777 
4778 
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)4779 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
4780                                        Register src,
4781                                        Label* smi_case) {
4782   JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
4783   SmiUntag(dst, src);
4784 }
4785 
4786 
UntagAndJumpIfNotSmi(Register dst,Register src,Label * non_smi_case)4787 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
4788                                           Register src,
4789                                           Label* non_smi_case) {
4790   JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
4791   SmiUntag(dst, src);
4792 }
4793 
JumpIfSmi(Register value,Label * smi_label,Register scratch,BranchDelaySlot bd)4794 void MacroAssembler::JumpIfSmi(Register value,
4795                                Label* smi_label,
4796                                Register scratch,
4797                                BranchDelaySlot bd) {
4798   ASSERT_EQ(0, kSmiTag);
4799   andi(scratch, value, kSmiTagMask);
4800   Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
4801 }
4802 
JumpIfNotSmi(Register value,Label * not_smi_label,Register scratch,BranchDelaySlot bd)4803 void MacroAssembler::JumpIfNotSmi(Register value,
4804                                   Label* not_smi_label,
4805                                   Register scratch,
4806                                   BranchDelaySlot bd) {
4807   ASSERT_EQ(0, kSmiTag);
4808   andi(scratch, value, kSmiTagMask);
4809   Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
4810 }
4811 
4812 
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)4813 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4814                                       Register reg2,
4815                                       Label* on_not_both_smi) {
4816   STATIC_ASSERT(kSmiTag == 0);
4817   ASSERT_EQ(1, kSmiTagMask);
4818   or_(at, reg1, reg2);
4819   JumpIfNotSmi(at, on_not_both_smi);
4820 }
4821 
4822 
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)4823 void MacroAssembler::JumpIfEitherSmi(Register reg1,
4824                                      Register reg2,
4825                                      Label* on_either_smi) {
4826   STATIC_ASSERT(kSmiTag == 0);
4827   ASSERT_EQ(1, kSmiTagMask);
4828   // Both Smi tags must be 1 (not Smi).
4829   and_(at, reg1, reg2);
4830   JumpIfSmi(at, on_either_smi);
4831 }
4832 
4833 
AssertNotSmi(Register object)4834 void MacroAssembler::AssertNotSmi(Register object) {
4835   if (emit_debug_code()) {
4836     STATIC_ASSERT(kSmiTag == 0);
4837     andi(at, object, kSmiTagMask);
4838     Check(ne, kOperandIsASmi, at, Operand(zero_reg));
4839   }
4840 }
4841 
4842 
AssertSmi(Register object)4843 void MacroAssembler::AssertSmi(Register object) {
4844   if (emit_debug_code()) {
4845     STATIC_ASSERT(kSmiTag == 0);
4846     andi(at, object, kSmiTagMask);
4847     Check(eq, kOperandIsASmi, at, Operand(zero_reg));
4848   }
4849 }
4850 
4851 
AssertString(Register object)4852 void MacroAssembler::AssertString(Register object) {
4853   if (emit_debug_code()) {
4854     STATIC_ASSERT(kSmiTag == 0);
4855     SmiTst(object, t0);
4856     Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
4857     push(object);
4858     lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4859     lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4860     Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
4861     pop(object);
4862   }
4863 }
4864 
4865 
AssertName(Register object)4866 void MacroAssembler::AssertName(Register object) {
4867   if (emit_debug_code()) {
4868     STATIC_ASSERT(kSmiTag == 0);
4869     SmiTst(object, t0);
4870     Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
4871     push(object);
4872     lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4873     lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4874     Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
4875     pop(object);
4876   }
4877 }
4878 
4879 
AssertUndefinedOrAllocationSite(Register object,Register scratch)4880 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
4881                                                      Register scratch) {
4882   if (emit_debug_code()) {
4883     Label done_checking;
4884     AssertNotSmi(object);
4885     LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4886     Branch(&done_checking, eq, object, Operand(scratch));
4887     push(object);
4888     lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4889     LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
4890     Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
4891     pop(object);
4892     bind(&done_checking);
4893   }
4894 }
4895 
4896 
AssertIsRoot(Register reg,Heap::RootListIndex index)4897 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
4898   if (emit_debug_code()) {
4899     ASSERT(!reg.is(at));
4900     LoadRoot(at, index);
4901     Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
4902   }
4903 }
4904 
4905 
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)4906 void MacroAssembler::JumpIfNotHeapNumber(Register object,
4907                                          Register heap_number_map,
4908                                          Register scratch,
4909                                          Label* on_not_heap_number) {
4910   lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4911   AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4912   Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
4913 }
4914 
4915 
LookupNumberStringCache(Register object,Register result,Register scratch1,Register scratch2,Register scratch3,Label * not_found)4916 void MacroAssembler::LookupNumberStringCache(Register object,
4917                                              Register result,
4918                                              Register scratch1,
4919                                              Register scratch2,
4920                                              Register scratch3,
4921                                              Label* not_found) {
4922   // Use of registers. Register result is used as a temporary.
4923   Register number_string_cache = result;
4924   Register mask = scratch3;
4925 
4926   // Load the number string cache.
4927   LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
4928 
4929   // Make the hash mask from the length of the number string cache. It
4930   // contains two elements (number and string) for each cache entry.
4931   lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
4932   // Divide length by two (length is a smi).
4933   sra(mask, mask, kSmiTagSize + 1);
4934   Addu(mask, mask, -1);  // Make mask.
4935 
4936   // Calculate the entry in the number string cache. The hash value in the
4937   // number string cache for smis is just the smi value, and the hash for
4938   // doubles is the xor of the upper and lower words. See
4939   // Heap::GetNumberStringCache.
4940   Label is_smi;
4941   Label load_result_from_cache;
4942   JumpIfSmi(object, &is_smi);
4943   CheckMap(object,
4944            scratch1,
4945            Heap::kHeapNumberMapRootIndex,
4946            not_found,
4947            DONT_DO_SMI_CHECK);
4948 
4949   STATIC_ASSERT(8 == kDoubleSize);
4950   Addu(scratch1,
4951        object,
4952        Operand(HeapNumber::kValueOffset - kHeapObjectTag));
4953   lw(scratch2, MemOperand(scratch1, kPointerSize));
4954   lw(scratch1, MemOperand(scratch1, 0));
4955   Xor(scratch1, scratch1, Operand(scratch2));
4956   And(scratch1, scratch1, Operand(mask));
4957 
4958   // Calculate address of entry in string cache: each entry consists
4959   // of two pointer sized fields.
4960   sll(scratch1, scratch1, kPointerSizeLog2 + 1);
4961   Addu(scratch1, number_string_cache, scratch1);
4962 
4963   Register probe = mask;
4964   lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
4965   JumpIfSmi(probe, not_found);
4966   ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
4967   ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
4968   BranchF(&load_result_from_cache, NULL, eq, f12, f14);
4969   Branch(not_found);
4970 
4971   bind(&is_smi);
4972   Register scratch = scratch1;
4973   sra(scratch, object, 1);   // Shift away the tag.
4974   And(scratch, mask, Operand(scratch));
4975 
4976   // Calculate address of entry in string cache: each entry consists
4977   // of two pointer sized fields.
4978   sll(scratch, scratch, kPointerSizeLog2 + 1);
4979   Addu(scratch, number_string_cache, scratch);
4980 
4981   // Check if the entry is the smi we are looking for.
4982   lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
4983   Branch(not_found, ne, object, Operand(probe));
4984 
4985   // Get the result from the cache.
4986   bind(&load_result_from_cache);
4987   lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
4988 
4989   IncrementCounter(isolate()->counters()->number_to_string_native(),
4990                    1,
4991                    scratch1,
4992                    scratch2);
4993 }
4994 
4995 
JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)4996 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
4997     Register first,
4998     Register second,
4999     Register scratch1,
5000     Register scratch2,
5001     Label* failure) {
5002   // Test that both first and second are sequential ASCII strings.
5003   // Assume that they are non-smis.
5004   lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
5005   lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
5006   lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
5007   lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
5008 
5009   JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
5010                                                scratch2,
5011                                                scratch1,
5012                                                scratch2,
5013                                                failure);
5014 }
5015 
5016 
JumpIfNotBothSequentialAsciiStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)5017 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
5018                                                          Register second,
5019                                                          Register scratch1,
5020                                                          Register scratch2,
5021                                                          Label* failure) {
5022   // Check that neither is a smi.
5023   STATIC_ASSERT(kSmiTag == 0);
5024   And(scratch1, first, Operand(second));
5025   JumpIfSmi(scratch1, failure);
5026   JumpIfNonSmisNotBothSequentialAsciiStrings(first,
5027                                              second,
5028                                              scratch1,
5029                                              scratch2,
5030                                              failure);
5031 }
5032 
5033 
JumpIfBothInstanceTypesAreNotSequentialAscii(Register first,Register second,Register scratch1,Register scratch2,Label * failure)5034 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
5035     Register first,
5036     Register second,
5037     Register scratch1,
5038     Register scratch2,
5039     Label* failure) {
5040   const int kFlatAsciiStringMask =
5041       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5042   const int kFlatAsciiStringTag =
5043       kStringTag | kOneByteStringTag | kSeqStringTag;
5044   ASSERT(kFlatAsciiStringTag <= 0xffff);  // Ensure this fits 16-bit immed.
5045   andi(scratch1, first, kFlatAsciiStringMask);
5046   Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
5047   andi(scratch2, second, kFlatAsciiStringMask);
5048   Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
5049 }
5050 
5051 
JumpIfInstanceTypeIsNotSequentialAscii(Register type,Register scratch,Label * failure)5052 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
5053                                                             Register scratch,
5054                                                             Label* failure) {
5055   const int kFlatAsciiStringMask =
5056       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5057   const int kFlatAsciiStringTag =
5058       kStringTag | kOneByteStringTag | kSeqStringTag;
5059   And(scratch, type, Operand(kFlatAsciiStringMask));
5060   Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
5061 }
5062 
5063 
5064 static const int kRegisterPassedArguments = 4;
5065 
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)5066 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5067                                               int num_double_arguments) {
5068   int stack_passed_words = 0;
5069   num_reg_arguments += 2 * num_double_arguments;
5070 
5071   // Up to four simple arguments are passed in registers a0..a3.
5072   if (num_reg_arguments > kRegisterPassedArguments) {
5073     stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5074   }
5075   stack_passed_words += kCArgSlotCount;
5076   return stack_passed_words;
5077 }
5078 
5079 
EmitSeqStringSetCharCheck(Register string,Register index,Register value,Register scratch,uint32_t encoding_mask)5080 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5081                                                Register index,
5082                                                Register value,
5083                                                Register scratch,
5084                                                uint32_t encoding_mask) {
5085   Label is_object;
5086   SmiTst(string, at);
5087   Check(ne, kNonObject, at, Operand(zero_reg));
5088 
5089   lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
5090   lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5091 
5092   andi(at, at, kStringRepresentationMask | kStringEncodingMask);
5093   li(scratch, Operand(encoding_mask));
5094   Check(eq, kUnexpectedStringType, at, Operand(scratch));
5095 
5096   // The index is assumed to be untagged coming in, tag it to compare with the
5097   // string length without using a temp register, it is restored at the end of
5098   // this function.
5099   Label index_tag_ok, index_tag_bad;
5100   TrySmiTag(index, scratch, &index_tag_bad);
5101   Branch(&index_tag_ok);
5102   bind(&index_tag_bad);
5103   Abort(kIndexIsTooLarge);
5104   bind(&index_tag_ok);
5105 
5106   lw(at, FieldMemOperand(string, String::kLengthOffset));
5107   Check(lt, kIndexIsTooLarge, index, Operand(at));
5108 
5109   ASSERT(Smi::FromInt(0) == 0);
5110   Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5111 
5112   SmiUntag(index, index);
5113 }
5114 
5115 
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)5116 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5117                                           int num_double_arguments,
5118                                           Register scratch) {
5119   int frame_alignment = ActivationFrameAlignment();
5120 
5121   // Up to four simple arguments are passed in registers a0..a3.
5122   // Those four arguments must have reserved argument slots on the stack for
5123   // mips, even though those argument slots are not normally used.
5124   // Remaining arguments are pushed on the stack, above (higher address than)
5125   // the argument slots.
5126   int stack_passed_arguments = CalculateStackPassedWords(
5127       num_reg_arguments, num_double_arguments);
5128   if (frame_alignment > kPointerSize) {
5129     // Make stack end at alignment and make room for num_arguments - 4 words
5130     // and the original value of sp.
5131     mov(scratch, sp);
5132     Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5133     ASSERT(IsPowerOf2(frame_alignment));
5134     And(sp, sp, Operand(-frame_alignment));
5135     sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5136   } else {
5137     Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5138   }
5139 }
5140 
5141 
PrepareCallCFunction(int num_reg_arguments,Register scratch)5142 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5143                                           Register scratch) {
5144   PrepareCallCFunction(num_reg_arguments, 0, scratch);
5145 }
5146 
5147 
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)5148 void MacroAssembler::CallCFunction(ExternalReference function,
5149                                    int num_reg_arguments,
5150                                    int num_double_arguments) {
5151   li(t8, Operand(function));
5152   CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5153 }
5154 
5155 
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)5156 void MacroAssembler::CallCFunction(Register function,
5157                                    int num_reg_arguments,
5158                                    int num_double_arguments) {
5159   CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5160 }
5161 
5162 
CallCFunction(ExternalReference function,int num_arguments)5163 void MacroAssembler::CallCFunction(ExternalReference function,
5164                                    int num_arguments) {
5165   CallCFunction(function, num_arguments, 0);
5166 }
5167 
5168 
CallCFunction(Register function,int num_arguments)5169 void MacroAssembler::CallCFunction(Register function,
5170                                    int num_arguments) {
5171   CallCFunction(function, num_arguments, 0);
5172 }
5173 
5174 
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)5175 void MacroAssembler::CallCFunctionHelper(Register function,
5176                                          int num_reg_arguments,
5177                                          int num_double_arguments) {
5178   ASSERT(has_frame());
5179   // Make sure that the stack is aligned before calling a C function unless
5180   // running in the simulator. The simulator has its own alignment check which
5181   // provides more information.
5182   // The argument stots are presumed to have been set up by
5183   // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5184 
5185 #if V8_HOST_ARCH_MIPS
5186   if (emit_debug_code()) {
5187     int frame_alignment = OS::ActivationFrameAlignment();
5188     int frame_alignment_mask = frame_alignment - 1;
5189     if (frame_alignment > kPointerSize) {
5190       ASSERT(IsPowerOf2(frame_alignment));
5191       Label alignment_as_expected;
5192       And(at, sp, Operand(frame_alignment_mask));
5193       Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5194       // Don't use Check here, as it will call Runtime_Abort possibly
5195       // re-entering here.
5196       stop("Unexpected alignment in CallCFunction");
5197       bind(&alignment_as_expected);
5198     }
5199   }
5200 #endif  // V8_HOST_ARCH_MIPS
5201 
5202   // Just call directly. The function called cannot cause a GC, or
5203   // allow preemption, so the return address in the link register
5204   // stays correct.
5205 
5206   if (!function.is(t9)) {
5207     mov(t9, function);
5208     function = t9;
5209   }
5210 
5211   Call(function);
5212 
5213   int stack_passed_arguments = CalculateStackPassedWords(
5214       num_reg_arguments, num_double_arguments);
5215 
5216   if (OS::ActivationFrameAlignment() > kPointerSize) {
5217     lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5218   } else {
5219     Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
5220   }
5221 }
5222 
5223 
5224 #undef BRANCH_ARGS_CHECK
5225 
5226 
PatchRelocatedValue(Register li_location,Register scratch,Register new_value)5227 void MacroAssembler::PatchRelocatedValue(Register li_location,
5228                                          Register scratch,
5229                                          Register new_value) {
5230   lw(scratch, MemOperand(li_location));
5231   // At this point scratch is a lui(at, ...) instruction.
5232   if (emit_debug_code()) {
5233     And(scratch, scratch, kOpcodeMask);
5234     Check(eq, kTheInstructionToPatchShouldBeALui,
5235         scratch, Operand(LUI));
5236     lw(scratch, MemOperand(li_location));
5237   }
5238   srl(t9, new_value, kImm16Bits);
5239   Ins(scratch, t9, 0, kImm16Bits);
5240   sw(scratch, MemOperand(li_location));
5241 
5242   lw(scratch, MemOperand(li_location, kInstrSize));
5243   // scratch is now ori(at, ...).
5244   if (emit_debug_code()) {
5245     And(scratch, scratch, kOpcodeMask);
5246     Check(eq, kTheInstructionToPatchShouldBeAnOri,
5247         scratch, Operand(ORI));
5248     lw(scratch, MemOperand(li_location, kInstrSize));
5249   }
5250   Ins(scratch, new_value, 0, kImm16Bits);
5251   sw(scratch, MemOperand(li_location, kInstrSize));
5252 
5253   // Update the I-cache so the new lui and ori can be executed.
5254   FlushICache(li_location, 2);
5255 }
5256 
GetRelocatedValue(Register li_location,Register value,Register scratch)5257 void MacroAssembler::GetRelocatedValue(Register li_location,
5258                                        Register value,
5259                                        Register scratch) {
5260   lw(value, MemOperand(li_location));
5261   if (emit_debug_code()) {
5262     And(value, value, kOpcodeMask);
5263     Check(eq, kTheInstructionShouldBeALui,
5264         value, Operand(LUI));
5265     lw(value, MemOperand(li_location));
5266   }
5267 
5268   // value now holds a lui instruction. Extract the immediate.
5269   sll(value, value, kImm16Bits);
5270 
5271   lw(scratch, MemOperand(li_location, kInstrSize));
5272   if (emit_debug_code()) {
5273     And(scratch, scratch, kOpcodeMask);
5274     Check(eq, kTheInstructionShouldBeAnOri,
5275         scratch, Operand(ORI));
5276     lw(scratch, MemOperand(li_location, kInstrSize));
5277   }
5278   // "scratch" now holds an ori instruction. Extract the immediate.
5279   andi(scratch, scratch, kImm16Mask);
5280 
5281   // Merge the results.
5282   or_(value, value, scratch);
5283 }
5284 
5285 
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)5286 void MacroAssembler::CheckPageFlag(
5287     Register object,
5288     Register scratch,
5289     int mask,
5290     Condition cc,
5291     Label* condition_met) {
5292   And(scratch, object, Operand(~Page::kPageAlignmentMask));
5293   lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5294   And(scratch, scratch, Operand(mask));
5295   Branch(condition_met, cc, scratch, Operand(zero_reg));
5296 }
5297 
5298 
CheckMapDeprecated(Handle<Map> map,Register scratch,Label * if_deprecated)5299 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5300                                         Register scratch,
5301                                         Label* if_deprecated) {
5302   if (map->CanBeDeprecated()) {
5303     li(scratch, Operand(map));
5304     lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
5305     And(scratch, scratch, Operand(Map::Deprecated::kMask));
5306     Branch(if_deprecated, ne, scratch, Operand(zero_reg));
5307   }
5308 }
5309 
5310 
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)5311 void MacroAssembler::JumpIfBlack(Register object,
5312                                  Register scratch0,
5313                                  Register scratch1,
5314                                  Label* on_black) {
5315   HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
5316   ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5317 }
5318 
5319 
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)5320 void MacroAssembler::HasColor(Register object,
5321                               Register bitmap_scratch,
5322                               Register mask_scratch,
5323                               Label* has_color,
5324                               int first_bit,
5325                               int second_bit) {
5326   ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5327   ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5328 
5329   GetMarkBits(object, bitmap_scratch, mask_scratch);
5330 
5331   Label other_color, word_boundary;
5332   lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5333   And(t8, t9, Operand(mask_scratch));
5334   Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5335   // Shift left 1 by adding.
5336   Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5337   Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5338   And(t8, t9, Operand(mask_scratch));
5339   Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5340   jmp(&other_color);
5341 
5342   bind(&word_boundary);
5343   lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5344   And(t9, t9, Operand(1));
5345   Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5346   bind(&other_color);
5347 }
5348 
5349 
5350 // Detect some, but not all, common pointer-free objects.  This is used by the
5351 // incremental write barrier which doesn't care about oddballs (they are always
5352 // marked black immediately so this code is not hit).
JumpIfDataObject(Register value,Register scratch,Label * not_data_object)5353 void MacroAssembler::JumpIfDataObject(Register value,
5354                                       Register scratch,
5355                                       Label* not_data_object) {
5356   ASSERT(!AreAliased(value, scratch, t8, no_reg));
5357   Label is_data_object;
5358   lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5359   LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5360   Branch(&is_data_object, eq, t8, Operand(scratch));
5361   ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5362   ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5363   // If it's a string and it's not a cons string then it's an object containing
5364   // no GC pointers.
5365   lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5366   And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5367   Branch(not_data_object, ne, t8, Operand(zero_reg));
5368   bind(&is_data_object);
5369 }
5370 
5371 
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)5372 void MacroAssembler::GetMarkBits(Register addr_reg,
5373                                  Register bitmap_reg,
5374                                  Register mask_reg) {
5375   ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5376   And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5377   Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5378   const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5379   Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5380   sll(t8, t8, kPointerSizeLog2);
5381   Addu(bitmap_reg, bitmap_reg, t8);
5382   li(t8, Operand(1));
5383   sllv(mask_reg, t8, mask_reg);
5384 }
5385 
5386 
EnsureNotWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white_and_not_data)5387 void MacroAssembler::EnsureNotWhite(
5388     Register value,
5389     Register bitmap_scratch,
5390     Register mask_scratch,
5391     Register load_scratch,
5392     Label* value_is_white_and_not_data) {
5393   ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5394   GetMarkBits(value, bitmap_scratch, mask_scratch);
5395 
5396   // If the value is black or grey we don't need to do anything.
5397   ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5398   ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5399   ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
5400   ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5401 
5402   Label done;
5403 
5404   // Since both black and grey have a 1 in the first position and white does
5405   // not have a 1 there we only need to check one bit.
5406   lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5407   And(t8, mask_scratch, load_scratch);
5408   Branch(&done, ne, t8, Operand(zero_reg));
5409 
5410   if (emit_debug_code()) {
5411     // Check for impossible bit pattern.
5412     Label ok;
5413     // sll may overflow, making the check conservative.
5414     sll(t8, mask_scratch, 1);
5415     And(t8, load_scratch, t8);
5416     Branch(&ok, eq, t8, Operand(zero_reg));
5417     stop("Impossible marking bit pattern");
5418     bind(&ok);
5419   }
5420 
5421   // Value is white.  We check whether it is data that doesn't need scanning.
5422   // Currently only checks for HeapNumber and non-cons strings.
5423   Register map = load_scratch;  // Holds map while checking type.
5424   Register length = load_scratch;  // Holds length of object after testing type.
5425   Label is_data_object;
5426 
5427   // Check for heap-number
5428   lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
5429   LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5430   {
5431     Label skip;
5432     Branch(&skip, ne, t8, Operand(map));
5433     li(length, HeapNumber::kSize);
5434     Branch(&is_data_object);
5435     bind(&skip);
5436   }
5437 
5438   // Check for strings.
5439   ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5440   ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5441   // If it's a string and it's not a cons string then it's an object containing
5442   // no GC pointers.
5443   Register instance_type = load_scratch;
5444   lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5445   And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5446   Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5447   // It's a non-indirect (non-cons and non-slice) string.
5448   // If it's external, the length is just ExternalString::kSize.
5449   // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5450   // External strings are the only ones with the kExternalStringTag bit
5451   // set.
5452   ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
5453   ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
5454   And(t8, instance_type, Operand(kExternalStringTag));
5455   {
5456     Label skip;
5457     Branch(&skip, eq, t8, Operand(zero_reg));
5458     li(length, ExternalString::kSize);
5459     Branch(&is_data_object);
5460     bind(&skip);
5461   }
5462 
5463   // Sequential string, either ASCII or UC16.
5464   // For ASCII (char-size of 1) we shift the smi tag away to get the length.
5465   // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5466   // getting the length multiplied by 2.
5467   ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5468   ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5469   lw(t9, FieldMemOperand(value, String::kLengthOffset));
5470   And(t8, instance_type, Operand(kStringEncodingMask));
5471   {
5472     Label skip;
5473     Branch(&skip, eq, t8, Operand(zero_reg));
5474     srl(t9, t9, 1);
5475     bind(&skip);
5476   }
5477   Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5478   And(length, length, Operand(~kObjectAlignmentMask));
5479 
5480   bind(&is_data_object);
5481   // Value is a data object, and it is white.  Mark it black.  Since we know
5482   // that the object is white we can make it black by flipping one bit.
5483   lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5484   Or(t8, t8, Operand(mask_scratch));
5485   sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5486 
5487   And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5488   lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5489   Addu(t8, t8, Operand(length));
5490   sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5491 
5492   bind(&done);
5493 }
5494 
5495 
LoadInstanceDescriptors(Register map,Register descriptors)5496 void MacroAssembler::LoadInstanceDescriptors(Register map,
5497                                              Register descriptors) {
5498   lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5499 }
5500 
5501 
NumberOfOwnDescriptors(Register dst,Register map)5502 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5503   lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5504   DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5505 }
5506 
5507 
EnumLength(Register dst,Register map)5508 void MacroAssembler::EnumLength(Register dst, Register map) {
5509   STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5510   lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5511   And(dst, dst, Operand(Map::EnumLengthBits::kMask));
5512   SmiTag(dst);
5513 }
5514 
5515 
CheckEnumCache(Register null_value,Label * call_runtime)5516 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5517   Register  empty_fixed_array_value = t2;
5518   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5519   Label next, start;
5520   mov(a2, a0);
5521 
5522   // Check if the enum length field is properly initialized, indicating that
5523   // there is an enum cache.
5524   lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5525 
5526   EnumLength(a3, a1);
5527   Branch(
5528       call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5529 
5530   jmp(&start);
5531 
5532   bind(&next);
5533   lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5534 
5535   // For all objects but the receiver, check that the cache is empty.
5536   EnumLength(a3, a1);
5537   Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5538 
5539   bind(&start);
5540 
5541   // Check that there are no elements. Register a2 contains the current JS
5542   // object we've reached through the prototype chain.
5543   Label no_elements;
5544   lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5545   Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
5546 
5547   // Second chance, the object may be using the empty slow element dictionary.
5548   LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5549   Branch(call_runtime, ne, a2, Operand(at));
5550 
5551   bind(&no_elements);
5552   lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5553   Branch(&next, ne, a2, Operand(null_value));
5554 }
5555 
5556 
ClampUint8(Register output_reg,Register input_reg)5557 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5558   ASSERT(!output_reg.is(input_reg));
5559   Label done;
5560   li(output_reg, Operand(255));
5561   // Normal branch: nop in delay slot.
5562   Branch(&done, gt, input_reg, Operand(output_reg));
5563   // Use delay slot in this branch.
5564   Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5565   mov(output_reg, zero_reg);  // In delay slot.
5566   mov(output_reg, input_reg);  // Value is in range 0..255.
5567   bind(&done);
5568 }
5569 
5570 
ClampDoubleToUint8(Register result_reg,DoubleRegister input_reg,DoubleRegister temp_double_reg)5571 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5572                                         DoubleRegister input_reg,
5573                                         DoubleRegister temp_double_reg) {
5574   Label above_zero;
5575   Label done;
5576   Label in_bounds;
5577 
5578   Move(temp_double_reg, 0.0);
5579   BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5580 
5581   // Double value is less than zero, NaN or Inf, return 0.
5582   mov(result_reg, zero_reg);
5583   Branch(&done);
5584 
5585   // Double value is >= 255, return 255.
5586   bind(&above_zero);
5587   Move(temp_double_reg, 255.0);
5588   BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5589   li(result_reg, Operand(255));
5590   Branch(&done);
5591 
5592   // In 0-255 range, round and truncate.
5593   bind(&in_bounds);
5594   cvt_w_d(temp_double_reg, input_reg);
5595   mfc1(result_reg, temp_double_reg);
5596   bind(&done);
5597 }
5598 
5599 
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found,Condition cond,Label * allocation_memento_present)5600 void MacroAssembler::TestJSArrayForAllocationMemento(
5601     Register receiver_reg,
5602     Register scratch_reg,
5603     Label* no_memento_found,
5604     Condition cond,
5605     Label* allocation_memento_present) {
5606   ExternalReference new_space_start =
5607       ExternalReference::new_space_start(isolate());
5608   ExternalReference new_space_allocation_top =
5609       ExternalReference::new_space_allocation_top_address(isolate());
5610   Addu(scratch_reg, receiver_reg,
5611        Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5612   Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
5613   li(at, Operand(new_space_allocation_top));
5614   lw(at, MemOperand(at));
5615   Branch(no_memento_found, gt, scratch_reg, Operand(at));
5616   lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
5617   if (allocation_memento_present) {
5618     Branch(allocation_memento_present, cond, scratch_reg,
5619            Operand(isolate()->factory()->allocation_memento_map()));
5620   }
5621 }
5622 
5623 
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)5624 Register GetRegisterThatIsNotOneOf(Register reg1,
5625                                    Register reg2,
5626                                    Register reg3,
5627                                    Register reg4,
5628                                    Register reg5,
5629                                    Register reg6) {
5630   RegList regs = 0;
5631   if (reg1.is_valid()) regs |= reg1.bit();
5632   if (reg2.is_valid()) regs |= reg2.bit();
5633   if (reg3.is_valid()) regs |= reg3.bit();
5634   if (reg4.is_valid()) regs |= reg4.bit();
5635   if (reg5.is_valid()) regs |= reg5.bit();
5636   if (reg6.is_valid()) regs |= reg6.bit();
5637 
5638   for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
5639     Register candidate = Register::FromAllocationIndex(i);
5640     if (regs & candidate.bit()) continue;
5641     return candidate;
5642   }
5643   UNREACHABLE();
5644   return no_reg;
5645 }
5646 
5647 
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)5648 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5649     Register object,
5650     Register scratch0,
5651     Register scratch1,
5652     Label* found) {
5653   ASSERT(!scratch1.is(scratch0));
5654   Factory* factory = isolate()->factory();
5655   Register current = scratch0;
5656   Label loop_again;
5657 
5658   // Scratch contained elements pointer.
5659   Move(current, object);
5660 
5661   // Loop based on the map going up the prototype chain.
5662   bind(&loop_again);
5663   lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
5664   lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
5665   DecodeField<Map::ElementsKindBits>(scratch1);
5666   Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
5667   lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
5668   Branch(&loop_again, ne, current, Operand(factory->null_value()));
5669 }
5670 
5671 
AreAliased(Register r1,Register r2,Register r3,Register r4)5672 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
5673   if (r1.is(r2)) return true;
5674   if (r1.is(r3)) return true;
5675   if (r1.is(r4)) return true;
5676   if (r2.is(r3)) return true;
5677   if (r2.is(r4)) return true;
5678   if (r3.is(r4)) return true;
5679   return false;
5680 }
5681 
5682 
CodePatcher(byte * address,int instructions,FlushICache flush_cache)5683 CodePatcher::CodePatcher(byte* address,
5684                          int instructions,
5685                          FlushICache flush_cache)
5686     : address_(address),
5687       size_(instructions * Assembler::kInstrSize),
5688       masm_(NULL, address, size_ + Assembler::kGap),
5689       flush_cache_(flush_cache) {
5690   // Create a new macro assembler pointing to the address of the code to patch.
5691   // The size is adjusted with kGap on order for the assembler to generate size
5692   // bytes of instructions without failing with buffer size constraints.
5693   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5694 }
5695 
5696 
~CodePatcher()5697 CodePatcher::~CodePatcher() {
5698   // Indicate that code has changed.
5699   if (flush_cache_ == FLUSH) {
5700     CPU::FlushICache(address_, size_);
5701   }
5702 
5703   // Check that the code was patched as expected.
5704   ASSERT(masm_.pc_ == address_ + size_);
5705   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5706 }
5707 
5708 
Emit(Instr instr)5709 void CodePatcher::Emit(Instr instr) {
5710   masm()->emit(instr);
5711 }
5712 
5713 
Emit(Address addr)5714 void CodePatcher::Emit(Address addr) {
5715   masm()->emit(reinterpret_cast<Instr>(addr));
5716 }
5717 
5718 
ChangeBranchCondition(Condition cond)5719 void CodePatcher::ChangeBranchCondition(Condition cond) {
5720   Instr instr = Assembler::instr_at(masm_.pc_);
5721   ASSERT(Assembler::IsBranch(instr));
5722   uint32_t opcode = Assembler::GetOpcodeField(instr);
5723   // Currently only the 'eq' and 'ne' cond values are supported and the simple
5724   // branch instructions (with opcode being the branch type).
5725   // There are some special cases (see Assembler::IsBranch()) so extending this
5726   // would be tricky.
5727   ASSERT(opcode == BEQ ||
5728          opcode == BNE ||
5729         opcode == BLEZ ||
5730         opcode == BGTZ ||
5731         opcode == BEQL ||
5732         opcode == BNEL ||
5733        opcode == BLEZL ||
5734        opcode == BGTZL);
5735   opcode = (cond == eq) ? BEQ : BNE;
5736   instr = (instr & ~kOpcodeMask) | opcode;
5737   masm_.emit(instr);
5738 }
5739 
5740 
TruncatingDiv(Register result,Register dividend,int32_t divisor)5741 void MacroAssembler::TruncatingDiv(Register result,
5742                                    Register dividend,
5743                                    int32_t divisor) {
5744   ASSERT(!dividend.is(result));
5745   ASSERT(!dividend.is(at));
5746   ASSERT(!result.is(at));
5747   MultiplierAndShift ms(divisor);
5748   li(at, Operand(ms.multiplier()));
5749   Mult(dividend, Operand(at));
5750   mfhi(result);
5751   if (divisor > 0 && ms.multiplier() < 0) {
5752     Addu(result, result, Operand(dividend));
5753   }
5754   if (divisor < 0 && ms.multiplier() > 0) {
5755     Subu(result, result, Operand(dividend));
5756   }
5757   if (ms.shift() > 0) sra(result, result, ms.shift());
5758   srl(at, dividend, 31);
5759   Addu(result, result, Operand(at));
5760 }
5761 
5762 
5763 } }  // namespace v8::internal
5764 
5765 #endif  // V8_TARGET_ARCH_MIPS
5766