• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <limits.h>  // For LONG_MIN, LONG_MAX.
6 
7 #include "src/v8.h"
8 
9 #if V8_TARGET_ARCH_MIPS
10 
11 #include "src/base/bits.h"
12 #include "src/base/division-by-constant.h"
13 #include "src/bootstrapper.h"
14 #include "src/codegen.h"
15 #include "src/cpu-profiler.h"
16 #include "src/debug.h"
17 #include "src/isolate-inl.h"
18 #include "src/runtime.h"
19 
20 namespace v8 {
21 namespace internal {
22 
MacroAssembler(Isolate * arg_isolate,void * buffer,int size)23 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
24     : Assembler(arg_isolate, buffer, size),
25       generating_stub_(false),
26       has_frame_(false) {
27   if (isolate() != NULL) {
28     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
29                                   isolate());
30   }
31 }
32 
33 
Load(Register dst,const MemOperand & src,Representation r)34 void MacroAssembler::Load(Register dst,
35                           const MemOperand& src,
36                           Representation r) {
37   DCHECK(!r.IsDouble());
38   if (r.IsInteger8()) {
39     lb(dst, src);
40   } else if (r.IsUInteger8()) {
41     lbu(dst, src);
42   } else if (r.IsInteger16()) {
43     lh(dst, src);
44   } else if (r.IsUInteger16()) {
45     lhu(dst, src);
46   } else {
47     lw(dst, src);
48   }
49 }
50 
51 
Store(Register src,const MemOperand & dst,Representation r)52 void MacroAssembler::Store(Register src,
53                            const MemOperand& dst,
54                            Representation r) {
55   DCHECK(!r.IsDouble());
56   if (r.IsInteger8() || r.IsUInteger8()) {
57     sb(src, dst);
58   } else if (r.IsInteger16() || r.IsUInteger16()) {
59     sh(src, dst);
60   } else {
61     if (r.IsHeapObject()) {
62       AssertNotSmi(src);
63     } else if (r.IsSmi()) {
64       AssertSmi(src);
65     }
66     sw(src, dst);
67   }
68 }
69 
70 
LoadRoot(Register destination,Heap::RootListIndex index)71 void MacroAssembler::LoadRoot(Register destination,
72                               Heap::RootListIndex index) {
73   lw(destination, MemOperand(s6, index << kPointerSizeLog2));
74 }
75 
76 
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)77 void MacroAssembler::LoadRoot(Register destination,
78                               Heap::RootListIndex index,
79                               Condition cond,
80                               Register src1, const Operand& src2) {
81   Branch(2, NegateCondition(cond), src1, src2);
82   lw(destination, MemOperand(s6, index << kPointerSizeLog2));
83 }
84 
85 
StoreRoot(Register source,Heap::RootListIndex index)86 void MacroAssembler::StoreRoot(Register source,
87                                Heap::RootListIndex index) {
88   sw(source, MemOperand(s6, index << kPointerSizeLog2));
89 }
90 
91 
StoreRoot(Register source,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)92 void MacroAssembler::StoreRoot(Register source,
93                                Heap::RootListIndex index,
94                                Condition cond,
95                                Register src1, const Operand& src2) {
96   Branch(2, NegateCondition(cond), src1, src2);
97   sw(source, MemOperand(s6, index << kPointerSizeLog2));
98 }
99 
100 
101 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()102 void MacroAssembler::PushSafepointRegisters() {
103   // Safepoints expect a block of kNumSafepointRegisters values on the
104   // stack, so adjust the stack for unsaved registers.
105   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
106   DCHECK(num_unsaved >= 0);
107   if (num_unsaved > 0) {
108     Subu(sp, sp, Operand(num_unsaved * kPointerSize));
109   }
110   MultiPush(kSafepointSavedRegisters);
111 }
112 
113 
PopSafepointRegisters()114 void MacroAssembler::PopSafepointRegisters() {
115   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
116   MultiPop(kSafepointSavedRegisters);
117   if (num_unsaved > 0) {
118     Addu(sp, sp, Operand(num_unsaved * kPointerSize));
119   }
120 }
121 
122 
StoreToSafepointRegisterSlot(Register src,Register dst)123 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
124   sw(src, SafepointRegisterSlot(dst));
125 }
126 
127 
LoadFromSafepointRegisterSlot(Register dst,Register src)128 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
129   lw(dst, SafepointRegisterSlot(src));
130 }
131 
132 
SafepointRegisterStackIndex(int reg_code)133 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
134   // The registers are pushed starting with the highest encoding,
135   // which means that lowest encodings are closest to the stack pointer.
136   return kSafepointRegisterStackIndexMap[reg_code];
137 }
138 
139 
SafepointRegisterSlot(Register reg)140 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
141   return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
142 }
143 
144 
SafepointRegistersAndDoublesSlot(Register reg)145 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
146   UNIMPLEMENTED_MIPS();
147   // General purpose registers are pushed last on the stack.
148   int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
149   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
150   return MemOperand(sp, doubles_size + register_offset);
151 }
152 
153 
InNewSpace(Register object,Register scratch,Condition cc,Label * branch)154 void MacroAssembler::InNewSpace(Register object,
155                                 Register scratch,
156                                 Condition cc,
157                                 Label* branch) {
158   DCHECK(cc == eq || cc == ne);
159   And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
160   Branch(branch, cc, scratch,
161          Operand(ExternalReference::new_space_start(isolate())));
162 }
163 
164 
RecordWriteField(Register object,int offset,Register value,Register dst,RAStatus ra_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)165 void MacroAssembler::RecordWriteField(
166     Register object,
167     int offset,
168     Register value,
169     Register dst,
170     RAStatus ra_status,
171     SaveFPRegsMode save_fp,
172     RememberedSetAction remembered_set_action,
173     SmiCheck smi_check,
174     PointersToHereCheck pointers_to_here_check_for_value) {
175   DCHECK(!AreAliased(value, dst, t8, object));
176   // First, check if a write barrier is even needed. The tests below
177   // catch stores of Smis.
178   Label done;
179 
180   // Skip barrier if writing a smi.
181   if (smi_check == INLINE_SMI_CHECK) {
182     JumpIfSmi(value, &done);
183   }
184 
185   // Although the object register is tagged, the offset is relative to the start
186   // of the object, so so offset must be a multiple of kPointerSize.
187   DCHECK(IsAligned(offset, kPointerSize));
188 
189   Addu(dst, object, Operand(offset - kHeapObjectTag));
190   if (emit_debug_code()) {
191     Label ok;
192     And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
193     Branch(&ok, eq, t8, Operand(zero_reg));
194     stop("Unaligned cell in write barrier");
195     bind(&ok);
196   }
197 
198   RecordWrite(object,
199               dst,
200               value,
201               ra_status,
202               save_fp,
203               remembered_set_action,
204               OMIT_SMI_CHECK,
205               pointers_to_here_check_for_value);
206 
207   bind(&done);
208 
209   // Clobber clobbered input registers when running with the debug-code flag
210   // turned on to provoke errors.
211   if (emit_debug_code()) {
212     li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
213     li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
214   }
215 }
216 
217 
218 // Will clobber 4 registers: object, map, dst, ip.  The
219 // register 'object' contains a heap object pointer.
RecordWriteForMap(Register object,Register map,Register dst,RAStatus ra_status,SaveFPRegsMode fp_mode)220 void MacroAssembler::RecordWriteForMap(Register object,
221                                        Register map,
222                                        Register dst,
223                                        RAStatus ra_status,
224                                        SaveFPRegsMode fp_mode) {
225   if (emit_debug_code()) {
226     DCHECK(!dst.is(at));
227     lw(dst, FieldMemOperand(map, HeapObject::kMapOffset));
228     Check(eq,
229           kWrongAddressOrValuePassedToRecordWrite,
230           dst,
231           Operand(isolate()->factory()->meta_map()));
232   }
233 
234   if (!FLAG_incremental_marking) {
235     return;
236   }
237 
238   if (emit_debug_code()) {
239     lw(at, FieldMemOperand(object, HeapObject::kMapOffset));
240     Check(eq,
241           kWrongAddressOrValuePassedToRecordWrite,
242           map,
243           Operand(at));
244   }
245 
246   Label done;
247 
248   // A single check of the map's pages interesting flag suffices, since it is
249   // only set during incremental collection, and then it's also guaranteed that
250   // the from object's page's interesting flag is also set.  This optimization
251   // relies on the fact that maps can never be in new space.
252   CheckPageFlag(map,
253                 map,  // Used as scratch.
254                 MemoryChunk::kPointersToHereAreInterestingMask,
255                 eq,
256                 &done);
257 
258   Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
259   if (emit_debug_code()) {
260     Label ok;
261     And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
262     Branch(&ok, eq, at, Operand(zero_reg));
263     stop("Unaligned cell in write barrier");
264     bind(&ok);
265   }
266 
267   // Record the actual write.
268   if (ra_status == kRAHasNotBeenSaved) {
269     push(ra);
270   }
271   RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
272                        fp_mode);
273   CallStub(&stub);
274   if (ra_status == kRAHasNotBeenSaved) {
275     pop(ra);
276   }
277 
278   bind(&done);
279 
280   // Count number of write barriers in generated code.
281   isolate()->counters()->write_barriers_static()->Increment();
282   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
283 
284   // Clobber clobbered registers when running with the debug-code flag
285   // turned on to provoke errors.
286   if (emit_debug_code()) {
287     li(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
288     li(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
289   }
290 }
291 
292 
293 // Will clobber 4 registers: object, address, scratch, ip.  The
294 // register 'object' contains a heap object pointer.  The heap object
295 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,RAStatus ra_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)296 void MacroAssembler::RecordWrite(
297     Register object,
298     Register address,
299     Register value,
300     RAStatus ra_status,
301     SaveFPRegsMode fp_mode,
302     RememberedSetAction remembered_set_action,
303     SmiCheck smi_check,
304     PointersToHereCheck pointers_to_here_check_for_value) {
305   DCHECK(!AreAliased(object, address, value, t8));
306   DCHECK(!AreAliased(object, address, value, t9));
307 
308   if (emit_debug_code()) {
309     lw(at, MemOperand(address));
310     Assert(
311         eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
312   }
313 
314   if (remembered_set_action == OMIT_REMEMBERED_SET &&
315       !FLAG_incremental_marking) {
316     return;
317   }
318 
319   // First, check if a write barrier is even needed. The tests below
320   // catch stores of smis and stores into the young generation.
321   Label done;
322 
323   if (smi_check == INLINE_SMI_CHECK) {
324     DCHECK_EQ(0, kSmiTag);
325     JumpIfSmi(value, &done);
326   }
327 
328   if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
329     CheckPageFlag(value,
330                   value,  // Used as scratch.
331                   MemoryChunk::kPointersToHereAreInterestingMask,
332                   eq,
333                   &done);
334   }
335   CheckPageFlag(object,
336                 value,  // Used as scratch.
337                 MemoryChunk::kPointersFromHereAreInterestingMask,
338                 eq,
339                 &done);
340 
341   // Record the actual write.
342   if (ra_status == kRAHasNotBeenSaved) {
343     push(ra);
344   }
345   RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
346                        fp_mode);
347   CallStub(&stub);
348   if (ra_status == kRAHasNotBeenSaved) {
349     pop(ra);
350   }
351 
352   bind(&done);
353 
354   // Count number of write barriers in generated code.
355   isolate()->counters()->write_barriers_static()->Increment();
356   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
357                    value);
358 
359   // Clobber clobbered registers when running with the debug-code flag
360   // turned on to provoke errors.
361   if (emit_debug_code()) {
362     li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
363     li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
364   }
365 }
366 
367 
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)368 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
369                                          Register address,
370                                          Register scratch,
371                                          SaveFPRegsMode fp_mode,
372                                          RememberedSetFinalAction and_then) {
373   Label done;
374   if (emit_debug_code()) {
375     Label ok;
376     JumpIfNotInNewSpace(object, scratch, &ok);
377     stop("Remembered set pointer is in new space");
378     bind(&ok);
379   }
380   // Load store buffer top.
381   ExternalReference store_buffer =
382       ExternalReference::store_buffer_top(isolate());
383   li(t8, Operand(store_buffer));
384   lw(scratch, MemOperand(t8));
385   // Store pointer to buffer and increment buffer top.
386   sw(address, MemOperand(scratch));
387   Addu(scratch, scratch, kPointerSize);
388   // Write back new top of buffer.
389   sw(scratch, MemOperand(t8));
390   // Call stub on end of buffer.
391   // Check for end of buffer.
392   And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
393   if (and_then == kFallThroughAtEnd) {
394     Branch(&done, eq, t8, Operand(zero_reg));
395   } else {
396     DCHECK(and_then == kReturnAtEnd);
397     Ret(eq, t8, Operand(zero_reg));
398   }
399   push(ra);
400   StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
401   CallStub(&store_buffer_overflow);
402   pop(ra);
403   bind(&done);
404   if (and_then == kReturnAtEnd) {
405     Ret();
406   }
407 }
408 
409 
410 // -----------------------------------------------------------------------------
411 // Allocation support.
412 
413 
CheckAccessGlobalProxy(Register holder_reg,Register scratch,Label * miss)414 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
415                                             Register scratch,
416                                             Label* miss) {
417   Label same_contexts;
418 
419   DCHECK(!holder_reg.is(scratch));
420   DCHECK(!holder_reg.is(at));
421   DCHECK(!scratch.is(at));
422 
423   // Load current lexical context from the stack frame.
424   lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
425   // In debug mode, make sure the lexical context is set.
426 #ifdef DEBUG
427   Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
428       scratch, Operand(zero_reg));
429 #endif
430 
431   // Load the native context of the current context.
432   int offset =
433       Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
434   lw(scratch, FieldMemOperand(scratch, offset));
435   lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
436 
437   // Check the context is a native context.
438   if (emit_debug_code()) {
439     push(holder_reg);  // Temporarily save holder on the stack.
440     // Read the first word and compare to the native_context_map.
441     lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
442     LoadRoot(at, Heap::kNativeContextMapRootIndex);
443     Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
444           holder_reg, Operand(at));
445     pop(holder_reg);  // Restore holder.
446   }
447 
448   // Check if both contexts are the same.
449   lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
450   Branch(&same_contexts, eq, scratch, Operand(at));
451 
452   // Check the context is a native context.
453   if (emit_debug_code()) {
454     push(holder_reg);  // Temporarily save holder on the stack.
455     mov(holder_reg, at);  // Move at to its holding place.
456     LoadRoot(at, Heap::kNullValueRootIndex);
457     Check(ne, kJSGlobalProxyContextShouldNotBeNull,
458           holder_reg, Operand(at));
459 
460     lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
461     LoadRoot(at, Heap::kNativeContextMapRootIndex);
462     Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
463           holder_reg, Operand(at));
464     // Restore at is not needed. at is reloaded below.
465     pop(holder_reg);  // Restore holder.
466     // Restore at to holder's context.
467     lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
468   }
469 
470   // Check that the security token in the calling global object is
471   // compatible with the security token in the receiving global
472   // object.
473   int token_offset = Context::kHeaderSize +
474                      Context::SECURITY_TOKEN_INDEX * kPointerSize;
475 
476   lw(scratch, FieldMemOperand(scratch, token_offset));
477   lw(at, FieldMemOperand(at, token_offset));
478   Branch(miss, ne, scratch, Operand(at));
479 
480   bind(&same_contexts);
481 }
482 
483 
484 // Compute the hash code from the untagged key.  This must be kept in sync with
485 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
486 // code-stub-hydrogen.cc
GetNumberHash(Register reg0,Register scratch)487 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
488   // First of all we assign the hash seed to scratch.
489   LoadRoot(scratch, Heap::kHashSeedRootIndex);
490   SmiUntag(scratch);
491 
492   // Xor original key with a seed.
493   xor_(reg0, reg0, scratch);
494 
495   // Compute the hash code from the untagged key.  This must be kept in sync
496   // with ComputeIntegerHash in utils.h.
497   //
498   // hash = ~hash + (hash << 15);
499   nor(scratch, reg0, zero_reg);
500   sll(at, reg0, 15);
501   addu(reg0, scratch, at);
502 
503   // hash = hash ^ (hash >> 12);
504   srl(at, reg0, 12);
505   xor_(reg0, reg0, at);
506 
507   // hash = hash + (hash << 2);
508   sll(at, reg0, 2);
509   addu(reg0, reg0, at);
510 
511   // hash = hash ^ (hash >> 4);
512   srl(at, reg0, 4);
513   xor_(reg0, reg0, at);
514 
515   // hash = hash * 2057;
516   sll(scratch, reg0, 11);
517   sll(at, reg0, 3);
518   addu(reg0, reg0, at);
519   addu(reg0, reg0, scratch);
520 
521   // hash = hash ^ (hash >> 16);
522   srl(at, reg0, 16);
523   xor_(reg0, reg0, at);
524 }
525 
526 
LoadFromNumberDictionary(Label * miss,Register elements,Register key,Register result,Register reg0,Register reg1,Register reg2)527 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
528                                               Register elements,
529                                               Register key,
530                                               Register result,
531                                               Register reg0,
532                                               Register reg1,
533                                               Register reg2) {
534   // Register use:
535   //
536   // elements - holds the slow-case elements of the receiver on entry.
537   //            Unchanged unless 'result' is the same register.
538   //
539   // key      - holds the smi key on entry.
540   //            Unchanged unless 'result' is the same register.
541   //
542   //
543   // result   - holds the result on exit if the load succeeded.
544   //            Allowed to be the same as 'key' or 'result'.
545   //            Unchanged on bailout so 'key' or 'result' can be used
546   //            in further computation.
547   //
548   // Scratch registers:
549   //
550   // reg0 - holds the untagged key on entry and holds the hash once computed.
551   //
552   // reg1 - Used to hold the capacity mask of the dictionary.
553   //
554   // reg2 - Used for the index into the dictionary.
555   // at   - Temporary (avoid MacroAssembler instructions also using 'at').
556   Label done;
557 
558   GetNumberHash(reg0, reg1);
559 
560   // Compute the capacity mask.
561   lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
562   sra(reg1, reg1, kSmiTagSize);
563   Subu(reg1, reg1, Operand(1));
564 
565   // Generate an unrolled loop that performs a few probes before giving up.
566   for (int i = 0; i < kNumberDictionaryProbes; i++) {
567     // Use reg2 for index calculations and keep the hash intact in reg0.
568     mov(reg2, reg0);
569     // Compute the masked index: (hash + i + i * i) & mask.
570     if (i > 0) {
571       Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
572     }
573     and_(reg2, reg2, reg1);
574 
575     // Scale the index by multiplying by the element size.
576     DCHECK(SeededNumberDictionary::kEntrySize == 3);
577     sll(at, reg2, 1);  // 2x.
578     addu(reg2, reg2, at);  // reg2 = reg2 * 3.
579 
580     // Check if the key is identical to the name.
581     sll(at, reg2, kPointerSizeLog2);
582     addu(reg2, elements, at);
583 
584     lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
585     if (i != kNumberDictionaryProbes - 1) {
586       Branch(&done, eq, key, Operand(at));
587     } else {
588       Branch(miss, ne, key, Operand(at));
589     }
590   }
591 
592   bind(&done);
593   // Check that the value is a normal property.
594   // reg2: elements + (index * kPointerSize).
595   const int kDetailsOffset =
596       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
597   lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
598   And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
599   Branch(miss, ne, at, Operand(zero_reg));
600 
601   // Get the value at the masked, scaled index and return.
602   const int kValueOffset =
603       SeededNumberDictionary::kElementsStartOffset + kPointerSize;
604   lw(result, FieldMemOperand(reg2, kValueOffset));
605 }
606 
607 
608 // ---------------------------------------------------------------------------
609 // Instruction macros.
610 
Addu(Register rd,Register rs,const Operand & rt)611 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
612   if (rt.is_reg()) {
613     addu(rd, rs, rt.rm());
614   } else {
615     if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
616       addiu(rd, rs, rt.imm32_);
617     } else {
618       // li handles the relocation.
619       DCHECK(!rs.is(at));
620       li(at, rt);
621       addu(rd, rs, at);
622     }
623   }
624 }
625 
626 
Subu(Register rd,Register rs,const Operand & rt)627 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
628   if (rt.is_reg()) {
629     subu(rd, rs, rt.rm());
630   } else {
631     if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
632       addiu(rd, rs, -rt.imm32_);  // No subiu instr, use addiu(x, y, -imm).
633     } else {
634       // li handles the relocation.
635       DCHECK(!rs.is(at));
636       li(at, rt);
637       subu(rd, rs, at);
638     }
639   }
640 }
641 
642 
Mul(Register rd,Register rs,const Operand & rt)643 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
644   if (rt.is_reg()) {
645     if (IsMipsArchVariant(kLoongson)) {
646       mult(rs, rt.rm());
647       mflo(rd);
648     } else {
649       mul(rd, rs, rt.rm());
650     }
651   } else {
652     // li handles the relocation.
653     DCHECK(!rs.is(at));
654     li(at, rt);
655     if (IsMipsArchVariant(kLoongson)) {
656       mult(rs, at);
657       mflo(rd);
658     } else {
659       mul(rd, rs, at);
660     }
661   }
662 }
663 
664 
Mul(Register rd_hi,Register rd_lo,Register rs,const Operand & rt)665 void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
666     Register rs, const Operand& rt) {
667   if (rt.is_reg()) {
668     if (!IsMipsArchVariant(kMips32r6)) {
669       mult(rs, rt.rm());
670       mflo(rd_lo);
671       mfhi(rd_hi);
672     } else {
673       if (rd_lo.is(rs)) {
674         DCHECK(!rd_hi.is(rs));
675         DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
676         muh(rd_hi, rs, rt.rm());
677         mul(rd_lo, rs, rt.rm());
678       } else {
679         DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
680         mul(rd_lo, rs, rt.rm());
681         muh(rd_hi, rs, rt.rm());
682       }
683     }
684   } else {
685     // li handles the relocation.
686     DCHECK(!rs.is(at));
687     li(at, rt);
688     if (!IsMipsArchVariant(kMips32r6)) {
689       mult(rs, at);
690       mflo(rd_lo);
691       mfhi(rd_hi);
692     } else {
693       if (rd_lo.is(rs)) {
694         DCHECK(!rd_hi.is(rs));
695         DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
696         muh(rd_hi, rs, at);
697         mul(rd_lo, rs, at);
698       } else {
699         DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
700         mul(rd_lo, rs, at);
701         muh(rd_hi, rs, at);
702       }
703     }
704   }
705 }
706 
707 
Mulh(Register rd,Register rs,const Operand & rt)708 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
709   if (rt.is_reg()) {
710     if (!IsMipsArchVariant(kMips32r6)) {
711       mult(rs, rt.rm());
712       mfhi(rd);
713     } else {
714       muh(rd, rs, rt.rm());
715     }
716   } else {
717     // li handles the relocation.
718     DCHECK(!rs.is(at));
719     li(at, rt);
720     if (!IsMipsArchVariant(kMips32r6)) {
721       mult(rs, at);
722       mfhi(rd);
723     } else {
724       muh(rd, rs, at);
725     }
726   }
727 }
728 
729 
Mult(Register rs,const Operand & rt)730 void MacroAssembler::Mult(Register rs, const Operand& rt) {
731   if (rt.is_reg()) {
732     mult(rs, rt.rm());
733   } else {
734     // li handles the relocation.
735     DCHECK(!rs.is(at));
736     li(at, rt);
737     mult(rs, at);
738   }
739 }
740 
741 
Multu(Register rs,const Operand & rt)742 void MacroAssembler::Multu(Register rs, const Operand& rt) {
743   if (rt.is_reg()) {
744     multu(rs, rt.rm());
745   } else {
746     // li handles the relocation.
747     DCHECK(!rs.is(at));
748     li(at, rt);
749     multu(rs, at);
750   }
751 }
752 
753 
Div(Register rs,const Operand & rt)754 void MacroAssembler::Div(Register rs, const Operand& rt) {
755   if (rt.is_reg()) {
756     div(rs, rt.rm());
757   } else {
758     // li handles the relocation.
759     DCHECK(!rs.is(at));
760     li(at, rt);
761     div(rs, at);
762   }
763 }
764 
765 
Div(Register rem,Register res,Register rs,const Operand & rt)766 void MacroAssembler::Div(Register rem, Register res,
767     Register rs, const Operand& rt) {
768   if (rt.is_reg()) {
769     if (!IsMipsArchVariant(kMips32r6)) {
770       div(rs, rt.rm());
771       mflo(res);
772       mfhi(rem);
773     } else {
774       div(res, rs, rt.rm());
775       mod(rem, rs, rt.rm());
776     }
777   } else {
778     // li handles the relocation.
779     DCHECK(!rs.is(at));
780     li(at, rt);
781     if (!IsMipsArchVariant(kMips32r6)) {
782       div(rs, at);
783       mflo(res);
784       mfhi(rem);
785     } else {
786       div(res, rs, at);
787       mod(rem, rs, at);
788     }
789   }
790 }
791 
792 
Mod(Register rd,Register rs,const Operand & rt)793 void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
794   if (rt.is_reg()) {
795     if (!IsMipsArchVariant(kMips32r6)) {
796       div(rs, rt.rm());
797       mfhi(rd);
798     } else {
799       mod(rd, rs, rt.rm());
800     }
801   } else {
802     // li handles the relocation.
803     DCHECK(!rs.is(at));
804     li(at, rt);
805     if (!IsMipsArchVariant(kMips32r6)) {
806       div(rs, at);
807       mfhi(rd);
808     } else {
809       mod(rd, rs, at);
810     }
811   }
812 }
813 
814 
Divu(Register rs,const Operand & rt)815 void MacroAssembler::Divu(Register rs, const Operand& rt) {
816   if (rt.is_reg()) {
817     divu(rs, rt.rm());
818   } else {
819     // li handles the relocation.
820     DCHECK(!rs.is(at));
821     li(at, rt);
822     divu(rs, at);
823   }
824 }
825 
826 
And(Register rd,Register rs,const Operand & rt)827 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
828   if (rt.is_reg()) {
829     and_(rd, rs, rt.rm());
830   } else {
831     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
832       andi(rd, rs, rt.imm32_);
833     } else {
834       // li handles the relocation.
835       DCHECK(!rs.is(at));
836       li(at, rt);
837       and_(rd, rs, at);
838     }
839   }
840 }
841 
842 
Or(Register rd,Register rs,const Operand & rt)843 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
844   if (rt.is_reg()) {
845     or_(rd, rs, rt.rm());
846   } else {
847     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
848       ori(rd, rs, rt.imm32_);
849     } else {
850       // li handles the relocation.
851       DCHECK(!rs.is(at));
852       li(at, rt);
853       or_(rd, rs, at);
854     }
855   }
856 }
857 
858 
Xor(Register rd,Register rs,const Operand & rt)859 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
860   if (rt.is_reg()) {
861     xor_(rd, rs, rt.rm());
862   } else {
863     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
864       xori(rd, rs, rt.imm32_);
865     } else {
866       // li handles the relocation.
867       DCHECK(!rs.is(at));
868       li(at, rt);
869       xor_(rd, rs, at);
870     }
871   }
872 }
873 
874 
Nor(Register rd,Register rs,const Operand & rt)875 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
876   if (rt.is_reg()) {
877     nor(rd, rs, rt.rm());
878   } else {
879     // li handles the relocation.
880     DCHECK(!rs.is(at));
881     li(at, rt);
882     nor(rd, rs, at);
883   }
884 }
885 
886 
Neg(Register rs,const Operand & rt)887 void MacroAssembler::Neg(Register rs, const Operand& rt) {
888   DCHECK(rt.is_reg());
889   DCHECK(!at.is(rs));
890   DCHECK(!at.is(rt.rm()));
891   li(at, -1);
892   xor_(rs, rt.rm(), at);
893 }
894 
895 
Slt(Register rd,Register rs,const Operand & rt)896 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
897   if (rt.is_reg()) {
898     slt(rd, rs, rt.rm());
899   } else {
900     if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
901       slti(rd, rs, rt.imm32_);
902     } else {
903       // li handles the relocation.
904       DCHECK(!rs.is(at));
905       li(at, rt);
906       slt(rd, rs, at);
907     }
908   }
909 }
910 
911 
Sltu(Register rd,Register rs,const Operand & rt)912 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
913   if (rt.is_reg()) {
914     sltu(rd, rs, rt.rm());
915   } else {
916     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
917       sltiu(rd, rs, rt.imm32_);
918     } else {
919       // li handles the relocation.
920       DCHECK(!rs.is(at));
921       li(at, rt);
922       sltu(rd, rs, at);
923     }
924   }
925 }
926 
927 
Ror(Register rd,Register rs,const Operand & rt)928 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
929   if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
930     if (rt.is_reg()) {
931       rotrv(rd, rs, rt.rm());
932     } else {
933       rotr(rd, rs, rt.imm32_);
934     }
935   } else {
936     if (rt.is_reg()) {
937       subu(at, zero_reg, rt.rm());
938       sllv(at, rs, at);
939       srlv(rd, rs, rt.rm());
940       or_(rd, rd, at);
941     } else {
942       if (rt.imm32_ == 0) {
943         srl(rd, rs, 0);
944       } else {
945         srl(at, rs, rt.imm32_);
946         sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
947         or_(rd, rd, at);
948       }
949     }
950   }
951 }
952 
953 
Pref(int32_t hint,const MemOperand & rs)954 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
955   if (IsMipsArchVariant(kLoongson)) {
956     lw(zero_reg, rs);
957   } else {
958     pref(hint, rs);
959   }
960 }
961 
962 
963 // ------------Pseudo-instructions-------------
964 
Ulw(Register rd,const MemOperand & rs)965 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
966   lwr(rd, rs);
967   lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
968 }
969 
970 
Usw(Register rd,const MemOperand & rs)971 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
972   swr(rd, rs);
973   swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
974 }
975 
976 
li(Register dst,Handle<Object> value,LiFlags mode)977 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
978   AllowDeferredHandleDereference smi_check;
979   if (value->IsSmi()) {
980     li(dst, Operand(value), mode);
981   } else {
982     DCHECK(value->IsHeapObject());
983     if (isolate()->heap()->InNewSpace(*value)) {
984       Handle<Cell> cell = isolate()->factory()->NewCell(value);
985       li(dst, Operand(cell));
986       lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
987     } else {
988       li(dst, Operand(value));
989     }
990   }
991 }
992 
993 
li(Register rd,Operand j,LiFlags mode)994 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
995   DCHECK(!j.is_reg());
996   BlockTrampolinePoolScope block_trampoline_pool(this);
997   if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
998     // Normal load of an immediate value which does not need Relocation Info.
999     if (is_int16(j.imm32_)) {
1000       addiu(rd, zero_reg, j.imm32_);
1001     } else if (!(j.imm32_ & kHiMask)) {
1002       ori(rd, zero_reg, j.imm32_);
1003     } else if (!(j.imm32_ & kImm16Mask)) {
1004       lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1005     } else {
1006       lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1007       ori(rd, rd, (j.imm32_ & kImm16Mask));
1008     }
1009   } else {
1010     if (MustUseReg(j.rmode_)) {
1011       RecordRelocInfo(j.rmode_, j.imm32_);
1012     }
1013     // We always need the same number of instructions as we may need to patch
1014     // this code to load another value which may need 2 instructions to load.
1015     lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1016     ori(rd, rd, (j.imm32_ & kImm16Mask));
1017   }
1018 }
1019 
1020 
MultiPush(RegList regs)1021 void MacroAssembler::MultiPush(RegList regs) {
1022   int16_t num_to_push = NumberOfBitsSet(regs);
1023   int16_t stack_offset = num_to_push * kPointerSize;
1024 
1025   Subu(sp, sp, Operand(stack_offset));
1026   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1027     if ((regs & (1 << i)) != 0) {
1028       stack_offset -= kPointerSize;
1029       sw(ToRegister(i), MemOperand(sp, stack_offset));
1030     }
1031   }
1032 }
1033 
1034 
MultiPushReversed(RegList regs)1035 void MacroAssembler::MultiPushReversed(RegList regs) {
1036   int16_t num_to_push = NumberOfBitsSet(regs);
1037   int16_t stack_offset = num_to_push * kPointerSize;
1038 
1039   Subu(sp, sp, Operand(stack_offset));
1040   for (int16_t i = 0; i < kNumRegisters; i++) {
1041     if ((regs & (1 << i)) != 0) {
1042       stack_offset -= kPointerSize;
1043       sw(ToRegister(i), MemOperand(sp, stack_offset));
1044     }
1045   }
1046 }
1047 
1048 
MultiPop(RegList regs)1049 void MacroAssembler::MultiPop(RegList regs) {
1050   int16_t stack_offset = 0;
1051 
1052   for (int16_t i = 0; i < kNumRegisters; i++) {
1053     if ((regs & (1 << i)) != 0) {
1054       lw(ToRegister(i), MemOperand(sp, stack_offset));
1055       stack_offset += kPointerSize;
1056     }
1057   }
1058   addiu(sp, sp, stack_offset);
1059 }
1060 
1061 
MultiPopReversed(RegList regs)1062 void MacroAssembler::MultiPopReversed(RegList regs) {
1063   int16_t stack_offset = 0;
1064 
1065   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1066     if ((regs & (1 << i)) != 0) {
1067       lw(ToRegister(i), MemOperand(sp, stack_offset));
1068       stack_offset += kPointerSize;
1069     }
1070   }
1071   addiu(sp, sp, stack_offset);
1072 }
1073 
1074 
MultiPushFPU(RegList regs)1075 void MacroAssembler::MultiPushFPU(RegList regs) {
1076   int16_t num_to_push = NumberOfBitsSet(regs);
1077   int16_t stack_offset = num_to_push * kDoubleSize;
1078 
1079   Subu(sp, sp, Operand(stack_offset));
1080   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1081     if ((regs & (1 << i)) != 0) {
1082       stack_offset -= kDoubleSize;
1083       sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1084     }
1085   }
1086 }
1087 
1088 
MultiPushReversedFPU(RegList regs)1089 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1090   int16_t num_to_push = NumberOfBitsSet(regs);
1091   int16_t stack_offset = num_to_push * kDoubleSize;
1092 
1093   Subu(sp, sp, Operand(stack_offset));
1094   for (int16_t i = 0; i < kNumRegisters; i++) {
1095     if ((regs & (1 << i)) != 0) {
1096       stack_offset -= kDoubleSize;
1097       sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1098     }
1099   }
1100 }
1101 
1102 
MultiPopFPU(RegList regs)1103 void MacroAssembler::MultiPopFPU(RegList regs) {
1104   int16_t stack_offset = 0;
1105 
1106   for (int16_t i = 0; i < kNumRegisters; i++) {
1107     if ((regs & (1 << i)) != 0) {
1108       ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1109       stack_offset += kDoubleSize;
1110     }
1111   }
1112   addiu(sp, sp, stack_offset);
1113 }
1114 
1115 
MultiPopReversedFPU(RegList regs)1116 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1117   int16_t stack_offset = 0;
1118 
1119   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1120     if ((regs & (1 << i)) != 0) {
1121       ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1122       stack_offset += kDoubleSize;
1123     }
1124   }
1125   addiu(sp, sp, stack_offset);
1126 }
1127 
1128 
FlushICache(Register address,unsigned instructions)1129 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
1130   RegList saved_regs = kJSCallerSaved | ra.bit();
1131   MultiPush(saved_regs);
1132   AllowExternalCallThatCantCauseGC scope(this);
1133 
1134   // Save to a0 in case address == t0.
1135   Move(a0, address);
1136   PrepareCallCFunction(2, t0);
1137 
1138   li(a1, instructions * kInstrSize);
1139   CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
1140   MultiPop(saved_regs);
1141 }
1142 
1143 
Ext(Register rt,Register rs,uint16_t pos,uint16_t size)1144 void MacroAssembler::Ext(Register rt,
1145                          Register rs,
1146                          uint16_t pos,
1147                          uint16_t size) {
1148   DCHECK(pos < 32);
1149   DCHECK(pos + size < 33);
1150 
1151   if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1152     ext_(rt, rs, pos, size);
1153   } else {
1154     // Move rs to rt and shift it left then right to get the
1155     // desired bitfield on the right side and zeroes on the left.
1156     int shift_left = 32 - (pos + size);
1157     sll(rt, rs, shift_left);  // Acts as a move if shift_left == 0.
1158 
1159     int shift_right = 32 - size;
1160     if (shift_right > 0) {
1161       srl(rt, rt, shift_right);
1162     }
1163   }
1164 }
1165 
1166 
Ins(Register rt,Register rs,uint16_t pos,uint16_t size)1167 void MacroAssembler::Ins(Register rt,
1168                          Register rs,
1169                          uint16_t pos,
1170                          uint16_t size) {
1171   DCHECK(pos < 32);
1172   DCHECK(pos + size <= 32);
1173   DCHECK(size != 0);
1174 
1175   if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1176     ins_(rt, rs, pos, size);
1177   } else {
1178     DCHECK(!rt.is(t8) && !rs.is(t8));
1179     Subu(at, zero_reg, Operand(1));
1180     srl(at, at, 32 - size);
1181     and_(t8, rs, at);
1182     sll(t8, t8, pos);
1183     sll(at, at, pos);
1184     nor(at, at, zero_reg);
1185     and_(at, rt, at);
1186     or_(rt, t8, at);
1187   }
1188 }
1189 
1190 
Cvt_d_uw(FPURegister fd,FPURegister fs,FPURegister scratch)1191 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1192                               FPURegister fs,
1193                               FPURegister scratch) {
1194   // Move the data from fs to t8.
1195   mfc1(t8, fs);
1196   Cvt_d_uw(fd, t8, scratch);
1197 }
1198 
1199 
Cvt_d_uw(FPURegister fd,Register rs,FPURegister scratch)1200 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1201                               Register rs,
1202                               FPURegister scratch) {
1203   // Convert rs to a FP value in fd (and fd + 1).
1204   // We do this by converting rs minus the MSB to avoid sign conversion,
1205   // then adding 2^31 to the result (if needed).
1206 
1207   DCHECK(!fd.is(scratch));
1208   DCHECK(!rs.is(t9));
1209   DCHECK(!rs.is(at));
1210 
1211   // Save rs's MSB to t9.
1212   Ext(t9, rs, 31, 1);
1213   // Remove rs's MSB.
1214   Ext(at, rs, 0, 31);
1215   // Move the result to fd.
1216   mtc1(at, fd);
1217 
1218   // Convert fd to a real FP value.
1219   cvt_d_w(fd, fd);
1220 
1221   Label conversion_done;
1222 
1223   // If rs's MSB was 0, it's done.
1224   // Otherwise we need to add that to the FP register.
1225   Branch(&conversion_done, eq, t9, Operand(zero_reg));
1226 
1227   // Load 2^31 into f20 as its float representation.
1228   li(at, 0x41E00000);
1229   mtc1(zero_reg, scratch);
1230   Mthc1(at, scratch);
1231   // Add it to fd.
1232   add_d(fd, fd, scratch);
1233 
1234   bind(&conversion_done);
1235 }
1236 
1237 
Trunc_uw_d(FPURegister fd,FPURegister fs,FPURegister scratch)1238 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1239                                 FPURegister fs,
1240                                 FPURegister scratch) {
1241   Trunc_uw_d(fs, t8, scratch);
1242   mtc1(t8, fd);
1243 }
1244 
1245 
Trunc_w_d(FPURegister fd,FPURegister fs)1246 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1247   if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1248     Mfhc1(t8, fs);
1249     trunc_w_d(fd, fs);
1250     Mthc1(t8, fs);
1251   } else {
1252     trunc_w_d(fd, fs);
1253   }
1254 }
1255 
1256 
Round_w_d(FPURegister fd,FPURegister fs)1257 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1258   if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1259     Mfhc1(t8, fs);
1260     round_w_d(fd, fs);
1261     Mthc1(t8, fs);
1262   } else {
1263     round_w_d(fd, fs);
1264   }
1265 }
1266 
1267 
Floor_w_d(FPURegister fd,FPURegister fs)1268 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1269   if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1270     Mfhc1(t8, fs);
1271     floor_w_d(fd, fs);
1272     Mthc1(t8, fs);
1273   } else {
1274     floor_w_d(fd, fs);
1275   }
1276 }
1277 
1278 
Ceil_w_d(FPURegister fd,FPURegister fs)1279 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1280   if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1281     Mfhc1(t8, fs);
1282     ceil_w_d(fd, fs);
1283     Mthc1(t8, fs);
1284   } else {
1285     ceil_w_d(fd, fs);
1286   }
1287 }
1288 
1289 
Trunc_uw_d(FPURegister fd,Register rs,FPURegister scratch)1290 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1291                                 Register rs,
1292                                 FPURegister scratch) {
1293   DCHECK(!fd.is(scratch));
1294   DCHECK(!rs.is(at));
1295 
1296   // Load 2^31 into scratch as its float representation.
1297   li(at, 0x41E00000);
1298   mtc1(zero_reg, scratch);
1299   Mthc1(at, scratch);
1300   // Test if scratch > fd.
1301   // If fd < 2^31 we can convert it normally.
1302   Label simple_convert;
1303   BranchF(&simple_convert, NULL, lt, fd, scratch);
1304 
1305   // First we subtract 2^31 from fd, then trunc it to rs
1306   // and add 2^31 to rs.
1307   sub_d(scratch, fd, scratch);
1308   trunc_w_d(scratch, scratch);
1309   mfc1(rs, scratch);
1310   Or(rs, rs, 1 << 31);
1311 
1312   Label done;
1313   Branch(&done);
1314   // Simple conversion.
1315   bind(&simple_convert);
1316   trunc_w_d(scratch, fd);
1317   mfc1(rs, scratch);
1318 
1319   bind(&done);
1320 }
1321 
1322 
Mthc1(Register rt,FPURegister fs)1323 void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
1324   if (IsFp64Mode()) {
1325     mthc1(rt, fs);
1326   } else {
1327     mtc1(rt, fs.high());
1328   }
1329 }
1330 
1331 
Mfhc1(Register rt,FPURegister fs)1332 void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
1333   if (IsFp64Mode()) {
1334     mfhc1(rt, fs);
1335   } else {
1336     mfc1(rt, fs.high());
1337   }
1338 }
1339 
1340 
BranchF(Label * target,Label * nan,Condition cc,FPURegister cmp1,FPURegister cmp2,BranchDelaySlot bd)1341 void MacroAssembler::BranchF(Label* target,
1342                              Label* nan,
1343                              Condition cc,
1344                              FPURegister cmp1,
1345                              FPURegister cmp2,
1346                              BranchDelaySlot bd) {
1347   BlockTrampolinePoolScope block_trampoline_pool(this);
1348   if (cc == al) {
1349     Branch(bd, target);
1350     return;
1351   }
1352 
1353   DCHECK(nan || target);
1354   // Check for unordered (NaN) cases.
1355   if (nan) {
1356     if (!IsMipsArchVariant(kMips32r6)) {
1357       c(UN, D, cmp1, cmp2);
1358       bc1t(nan);
1359     } else {
1360       // Use kDoubleCompareReg for comparison result. It has to be unavailable
1361       // to lithium register allocator.
1362       DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1363       cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
1364       bc1nez(nan, kDoubleCompareReg);
1365     }
1366   }
1367 
1368   if (!IsMipsArchVariant(kMips32r6)) {
1369     if (target) {
1370       // Here NaN cases were either handled by this function or are assumed to
1371       // have been handled by the caller.
1372       switch (cc) {
1373         case lt:
1374           c(OLT, D, cmp1, cmp2);
1375           bc1t(target);
1376           break;
1377         case gt:
1378           c(ULE, D, cmp1, cmp2);
1379           bc1f(target);
1380           break;
1381         case ge:
1382           c(ULT, D, cmp1, cmp2);
1383           bc1f(target);
1384           break;
1385         case le:
1386           c(OLE, D, cmp1, cmp2);
1387           bc1t(target);
1388           break;
1389         case eq:
1390           c(EQ, D, cmp1, cmp2);
1391           bc1t(target);
1392           break;
1393         case ueq:
1394           c(UEQ, D, cmp1, cmp2);
1395           bc1t(target);
1396           break;
1397         case ne:
1398           c(EQ, D, cmp1, cmp2);
1399           bc1f(target);
1400           break;
1401         case nue:
1402           c(UEQ, D, cmp1, cmp2);
1403           bc1f(target);
1404           break;
1405         default:
1406           CHECK(0);
1407       }
1408     }
1409   } else {
1410     if (target) {
1411       // Here NaN cases were either handled by this function or are assumed to
1412       // have been handled by the caller.
1413       // Unsigned conditions are treated as their signed counterpart.
1414       // Use kDoubleCompareReg for comparison result, it is
1415       // valid in fp64 (FR = 1) mode which is implied for mips32r6.
1416       DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1417       switch (cc) {
1418         case lt:
1419           cmp(OLT, L, kDoubleCompareReg, cmp1, cmp2);
1420           bc1nez(target, kDoubleCompareReg);
1421           break;
1422         case gt:
1423           cmp(ULE, L, kDoubleCompareReg, cmp1, cmp2);
1424           bc1eqz(target, kDoubleCompareReg);
1425           break;
1426         case ge:
1427           cmp(ULT, L, kDoubleCompareReg, cmp1, cmp2);
1428           bc1eqz(target, kDoubleCompareReg);
1429           break;
1430         case le:
1431           cmp(OLE, L, kDoubleCompareReg, cmp1, cmp2);
1432           bc1nez(target, kDoubleCompareReg);
1433           break;
1434         case eq:
1435           cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
1436           bc1nez(target, kDoubleCompareReg);
1437           break;
1438         case ueq:
1439           cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
1440           bc1nez(target, kDoubleCompareReg);
1441           break;
1442         case ne:
1443           cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
1444           bc1eqz(target, kDoubleCompareReg);
1445           break;
1446         case nue:
1447           cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
1448           bc1eqz(target, kDoubleCompareReg);
1449           break;
1450         default:
1451           CHECK(0);
1452       }
1453     }
1454   }
1455 
1456   if (bd == PROTECT) {
1457     nop();
1458   }
1459 }
1460 
1461 
Move(FPURegister dst,double imm)1462 void MacroAssembler::Move(FPURegister dst, double imm) {
1463   static const DoubleRepresentation minus_zero(-0.0);
1464   static const DoubleRepresentation zero(0.0);
1465   DoubleRepresentation value_rep(imm);
1466   // Handle special values first.
1467   bool force_load = dst.is(kDoubleRegZero);
1468   if (value_rep == zero && !force_load) {
1469     mov_d(dst, kDoubleRegZero);
1470   } else if (value_rep == minus_zero && !force_load) {
1471     neg_d(dst, kDoubleRegZero);
1472   } else {
1473     uint32_t lo, hi;
1474     DoubleAsTwoUInt32(imm, &lo, &hi);
1475     // Move the low part of the double into the lower of the corresponding FPU
1476     // register of FPU register pair.
1477     if (lo != 0) {
1478       li(at, Operand(lo));
1479       mtc1(at, dst);
1480     } else {
1481       mtc1(zero_reg, dst);
1482     }
1483     // Move the high part of the double into the higher of the corresponding FPU
1484     // register of FPU register pair.
1485     if (hi != 0) {
1486       li(at, Operand(hi));
1487       Mthc1(at, dst);
1488     } else {
1489       Mthc1(zero_reg, dst);
1490     }
1491   }
1492 }
1493 
1494 
Movz(Register rd,Register rs,Register rt)1495 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1496   if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
1497     Label done;
1498     Branch(&done, ne, rt, Operand(zero_reg));
1499     mov(rd, rs);
1500     bind(&done);
1501   } else {
1502     movz(rd, rs, rt);
1503   }
1504 }
1505 
1506 
Movn(Register rd,Register rs,Register rt)1507 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1508   if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
1509     Label done;
1510     Branch(&done, eq, rt, Operand(zero_reg));
1511     mov(rd, rs);
1512     bind(&done);
1513   } else {
1514     movn(rd, rs, rt);
1515   }
1516 }
1517 
1518 
Movt(Register rd,Register rs,uint16_t cc)1519 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1520   if (IsMipsArchVariant(kLoongson)) {
1521     // Tests an FP condition code and then conditionally move rs to rd.
1522     // We do not currently use any FPU cc bit other than bit 0.
1523     DCHECK(cc == 0);
1524     DCHECK(!(rs.is(t8) || rd.is(t8)));
1525     Label done;
1526     Register scratch = t8;
1527     // For testing purposes we need to fetch content of the FCSR register and
1528     // than test its cc (floating point condition code) bit (for cc = 0, it is
1529     // 24. bit of the FCSR).
1530     cfc1(scratch, FCSR);
1531     // For the MIPS I, II and III architectures, the contents of scratch is
1532     // UNPREDICTABLE for the instruction immediately following CFC1.
1533     nop();
1534     srl(scratch, scratch, 16);
1535     andi(scratch, scratch, 0x0080);
1536     Branch(&done, eq, scratch, Operand(zero_reg));
1537     mov(rd, rs);
1538     bind(&done);
1539   } else {
1540     movt(rd, rs, cc);
1541   }
1542 }
1543 
1544 
Movf(Register rd,Register rs,uint16_t cc)1545 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1546   if (IsMipsArchVariant(kLoongson)) {
1547     // Tests an FP condition code and then conditionally move rs to rd.
1548     // We do not currently use any FPU cc bit other than bit 0.
1549     DCHECK(cc == 0);
1550     DCHECK(!(rs.is(t8) || rd.is(t8)));
1551     Label done;
1552     Register scratch = t8;
1553     // For testing purposes we need to fetch content of the FCSR register and
1554     // than test its cc (floating point condition code) bit (for cc = 0, it is
1555     // 24. bit of the FCSR).
1556     cfc1(scratch, FCSR);
1557     // For the MIPS I, II and III architectures, the contents of scratch is
1558     // UNPREDICTABLE for the instruction immediately following CFC1.
1559     nop();
1560     srl(scratch, scratch, 16);
1561     andi(scratch, scratch, 0x0080);
1562     Branch(&done, ne, scratch, Operand(zero_reg));
1563     mov(rd, rs);
1564     bind(&done);
1565   } else {
1566     movf(rd, rs, cc);
1567   }
1568 }
1569 
1570 
Clz(Register rd,Register rs)1571 void MacroAssembler::Clz(Register rd, Register rs) {
1572   if (IsMipsArchVariant(kLoongson)) {
1573     DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1574     Register mask = t8;
1575     Register scratch = t9;
1576     Label loop, end;
1577     mov(at, rs);
1578     mov(rd, zero_reg);
1579     lui(mask, 0x8000);
1580     bind(&loop);
1581     and_(scratch, at, mask);
1582     Branch(&end, ne, scratch, Operand(zero_reg));
1583     addiu(rd, rd, 1);
1584     Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1585     srl(mask, mask, 1);
1586     bind(&end);
1587   } else {
1588     clz(rd, rs);
1589   }
1590 }
1591 
1592 
EmitFPUTruncate(FPURoundingMode rounding_mode,Register result,DoubleRegister double_input,Register scratch,DoubleRegister double_scratch,Register except_flag,CheckForInexactConversion check_inexact)1593 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1594                                      Register result,
1595                                      DoubleRegister double_input,
1596                                      Register scratch,
1597                                      DoubleRegister double_scratch,
1598                                      Register except_flag,
1599                                      CheckForInexactConversion check_inexact) {
1600   DCHECK(!result.is(scratch));
1601   DCHECK(!double_input.is(double_scratch));
1602   DCHECK(!except_flag.is(scratch));
1603 
1604   Label done;
1605 
1606   // Clear the except flag (0 = no exception)
1607   mov(except_flag, zero_reg);
1608 
1609   // Test for values that can be exactly represented as a signed 32-bit integer.
1610   cvt_w_d(double_scratch, double_input);
1611   mfc1(result, double_scratch);
1612   cvt_d_w(double_scratch, double_scratch);
1613   BranchF(&done, NULL, eq, double_input, double_scratch);
1614 
1615   int32_t except_mask = kFCSRFlagMask;  // Assume interested in all exceptions.
1616 
1617   if (check_inexact == kDontCheckForInexactConversion) {
1618     // Ignore inexact exceptions.
1619     except_mask &= ~kFCSRInexactFlagMask;
1620   }
1621 
1622   // Save FCSR.
1623   cfc1(scratch, FCSR);
1624   // Disable FPU exceptions.
1625   ctc1(zero_reg, FCSR);
1626 
1627   // Do operation based on rounding mode.
1628   switch (rounding_mode) {
1629     case kRoundToNearest:
1630       Round_w_d(double_scratch, double_input);
1631       break;
1632     case kRoundToZero:
1633       Trunc_w_d(double_scratch, double_input);
1634       break;
1635     case kRoundToPlusInf:
1636       Ceil_w_d(double_scratch, double_input);
1637       break;
1638     case kRoundToMinusInf:
1639       Floor_w_d(double_scratch, double_input);
1640       break;
1641   }  // End of switch-statement.
1642 
1643   // Retrieve FCSR.
1644   cfc1(except_flag, FCSR);
1645   // Restore FCSR.
1646   ctc1(scratch, FCSR);
1647   // Move the converted value into the result register.
1648   mfc1(result, double_scratch);
1649 
1650   // Check for fpu exceptions.
1651   And(except_flag, except_flag, Operand(except_mask));
1652 
1653   bind(&done);
1654 }
1655 
1656 
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)1657 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1658                                                 DoubleRegister double_input,
1659                                                 Label* done) {
1660   DoubleRegister single_scratch = kLithiumScratchDouble.low();
1661   Register scratch = at;
1662   Register scratch2 = t9;
1663 
1664   // Clear cumulative exception flags and save the FCSR.
1665   cfc1(scratch2, FCSR);
1666   ctc1(zero_reg, FCSR);
1667   // Try a conversion to a signed integer.
1668   trunc_w_d(single_scratch, double_input);
1669   mfc1(result, single_scratch);
1670   // Retrieve and restore the FCSR.
1671   cfc1(scratch, FCSR);
1672   ctc1(scratch2, FCSR);
1673   // Check for overflow and NaNs.
1674   And(scratch,
1675       scratch,
1676       kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1677   // If we had no exceptions we are done.
1678   Branch(done, eq, scratch, Operand(zero_reg));
1679 }
1680 
1681 
TruncateDoubleToI(Register result,DoubleRegister double_input)1682 void MacroAssembler::TruncateDoubleToI(Register result,
1683                                        DoubleRegister double_input) {
1684   Label done;
1685 
1686   TryInlineTruncateDoubleToI(result, double_input, &done);
1687 
1688   // If we fell through then inline version didn't succeed - call stub instead.
1689   push(ra);
1690   Subu(sp, sp, Operand(kDoubleSize));  // Put input on stack.
1691   sdc1(double_input, MemOperand(sp, 0));
1692 
1693   DoubleToIStub stub(isolate(), sp, result, 0, true, true);
1694   CallStub(&stub);
1695 
1696   Addu(sp, sp, Operand(kDoubleSize));
1697   pop(ra);
1698 
1699   bind(&done);
1700 }
1701 
1702 
TruncateHeapNumberToI(Register result,Register object)1703 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1704   Label done;
1705   DoubleRegister double_scratch = f12;
1706   DCHECK(!result.is(object));
1707 
1708   ldc1(double_scratch,
1709        MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1710   TryInlineTruncateDoubleToI(result, double_scratch, &done);
1711 
1712   // If we fell through then inline version didn't succeed - call stub instead.
1713   push(ra);
1714   DoubleToIStub stub(isolate(),
1715                      object,
1716                      result,
1717                      HeapNumber::kValueOffset - kHeapObjectTag,
1718                      true,
1719                      true);
1720   CallStub(&stub);
1721   pop(ra);
1722 
1723   bind(&done);
1724 }
1725 
1726 
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch,Label * not_number)1727 void MacroAssembler::TruncateNumberToI(Register object,
1728                                        Register result,
1729                                        Register heap_number_map,
1730                                        Register scratch,
1731                                        Label* not_number) {
1732   Label done;
1733   DCHECK(!result.is(object));
1734 
1735   UntagAndJumpIfSmi(result, object, &done);
1736   JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1737   TruncateHeapNumberToI(result, object);
1738 
1739   bind(&done);
1740 }
1741 
1742 
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)1743 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1744                                          Register src,
1745                                          int num_least_bits) {
1746   Ext(dst, src, kSmiTagSize, num_least_bits);
1747 }
1748 
1749 
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)1750 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1751                                            Register src,
1752                                            int num_least_bits) {
1753   And(dst, src, Operand((1 << num_least_bits) - 1));
1754 }
1755 
1756 
1757 // Emulated condtional branches do not emit a nop in the branch delay slot.
1758 //
1759 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1760 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK(                                \
1761     (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) ||          \
1762     (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1763 
1764 
Branch(int16_t offset,BranchDelaySlot bdslot)1765 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1766   BranchShort(offset, bdslot);
1767 }
1768 
1769 
Branch(int16_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)1770 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1771                             const Operand& rt,
1772                             BranchDelaySlot bdslot) {
1773   BranchShort(offset, cond, rs, rt, bdslot);
1774 }
1775 
1776 
Branch(Label * L,BranchDelaySlot bdslot)1777 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1778   if (L->is_bound()) {
1779     if (is_near(L)) {
1780       BranchShort(L, bdslot);
1781     } else {
1782       Jr(L, bdslot);
1783     }
1784   } else {
1785     if (is_trampoline_emitted()) {
1786       Jr(L, bdslot);
1787     } else {
1788       BranchShort(L, bdslot);
1789     }
1790   }
1791 }
1792 
1793 
Branch(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)1794 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1795                             const Operand& rt,
1796                             BranchDelaySlot bdslot) {
1797   if (L->is_bound()) {
1798     if (is_near(L)) {
1799       BranchShort(L, cond, rs, rt, bdslot);
1800     } else {
1801       if (cond != cc_always) {
1802         Label skip;
1803         Condition neg_cond = NegateCondition(cond);
1804         BranchShort(&skip, neg_cond, rs, rt);
1805         Jr(L, bdslot);
1806         bind(&skip);
1807       } else {
1808         Jr(L, bdslot);
1809       }
1810     }
1811   } else {
1812     if (is_trampoline_emitted()) {
1813       if (cond != cc_always) {
1814         Label skip;
1815         Condition neg_cond = NegateCondition(cond);
1816         BranchShort(&skip, neg_cond, rs, rt);
1817         Jr(L, bdslot);
1818         bind(&skip);
1819       } else {
1820         Jr(L, bdslot);
1821       }
1822     } else {
1823       BranchShort(L, cond, rs, rt, bdslot);
1824     }
1825   }
1826 }
1827 
1828 
Branch(Label * L,Condition cond,Register rs,Heap::RootListIndex index,BranchDelaySlot bdslot)1829 void MacroAssembler::Branch(Label* L,
1830                             Condition cond,
1831                             Register rs,
1832                             Heap::RootListIndex index,
1833                             BranchDelaySlot bdslot) {
1834   LoadRoot(at, index);
1835   Branch(L, cond, rs, Operand(at), bdslot);
1836 }
1837 
1838 
BranchShort(int16_t offset,BranchDelaySlot bdslot)1839 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1840   b(offset);
1841 
1842   // Emit a nop in the branch delay slot if required.
1843   if (bdslot == PROTECT)
1844     nop();
1845 }
1846 
1847 
BranchShort(int16_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)1848 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1849                                  const Operand& rt,
1850                                  BranchDelaySlot bdslot) {
1851   BRANCH_ARGS_CHECK(cond, rs, rt);
1852   DCHECK(!rs.is(zero_reg));
1853   Register r2 = no_reg;
1854   Register scratch = at;
1855 
1856   if (rt.is_reg()) {
1857     // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1858     // rt.
1859     BlockTrampolinePoolScope block_trampoline_pool(this);
1860     r2 = rt.rm_;
1861     switch (cond) {
1862       case cc_always:
1863         b(offset);
1864         break;
1865       case eq:
1866         beq(rs, r2, offset);
1867         break;
1868       case ne:
1869         bne(rs, r2, offset);
1870         break;
1871       // Signed comparison.
1872       case greater:
1873         if (r2.is(zero_reg)) {
1874           bgtz(rs, offset);
1875         } else {
1876           slt(scratch, r2, rs);
1877           bne(scratch, zero_reg, offset);
1878         }
1879         break;
1880       case greater_equal:
1881         if (r2.is(zero_reg)) {
1882           bgez(rs, offset);
1883         } else {
1884           slt(scratch, rs, r2);
1885           beq(scratch, zero_reg, offset);
1886         }
1887         break;
1888       case less:
1889         if (r2.is(zero_reg)) {
1890           bltz(rs, offset);
1891         } else {
1892           slt(scratch, rs, r2);
1893           bne(scratch, zero_reg, offset);
1894         }
1895         break;
1896       case less_equal:
1897         if (r2.is(zero_reg)) {
1898           blez(rs, offset);
1899         } else {
1900           slt(scratch, r2, rs);
1901           beq(scratch, zero_reg, offset);
1902         }
1903         break;
1904       // Unsigned comparison.
1905       case Ugreater:
1906         if (r2.is(zero_reg)) {
1907           bgtz(rs, offset);
1908         } else {
1909           sltu(scratch, r2, rs);
1910           bne(scratch, zero_reg, offset);
1911         }
1912         break;
1913       case Ugreater_equal:
1914         if (r2.is(zero_reg)) {
1915           bgez(rs, offset);
1916         } else {
1917           sltu(scratch, rs, r2);
1918           beq(scratch, zero_reg, offset);
1919         }
1920         break;
1921       case Uless:
1922         if (r2.is(zero_reg)) {
1923           // No code needs to be emitted.
1924           return;
1925         } else {
1926           sltu(scratch, rs, r2);
1927           bne(scratch, zero_reg, offset);
1928         }
1929         break;
1930       case Uless_equal:
1931         if (r2.is(zero_reg)) {
1932           b(offset);
1933         } else {
1934           sltu(scratch, r2, rs);
1935           beq(scratch, zero_reg, offset);
1936         }
1937         break;
1938       default:
1939         UNREACHABLE();
1940     }
1941   } else {
1942     // Be careful to always use shifted_branch_offset only just before the
1943     // branch instruction, as the location will be remember for patching the
1944     // target.
1945     BlockTrampolinePoolScope block_trampoline_pool(this);
1946     switch (cond) {
1947       case cc_always:
1948         b(offset);
1949         break;
1950       case eq:
1951         // We don't want any other register but scratch clobbered.
1952         DCHECK(!scratch.is(rs));
1953         r2 = scratch;
1954         li(r2, rt);
1955         beq(rs, r2, offset);
1956         break;
1957       case ne:
1958         // We don't want any other register but scratch clobbered.
1959         DCHECK(!scratch.is(rs));
1960         r2 = scratch;
1961         li(r2, rt);
1962         bne(rs, r2, offset);
1963         break;
1964       // Signed comparison.
1965       case greater:
1966         if (rt.imm32_ == 0) {
1967           bgtz(rs, offset);
1968         } else {
1969           r2 = scratch;
1970           li(r2, rt);
1971           slt(scratch, r2, rs);
1972           bne(scratch, zero_reg, offset);
1973         }
1974         break;
1975       case greater_equal:
1976         if (rt.imm32_ == 0) {
1977           bgez(rs, offset);
1978         } else if (is_int16(rt.imm32_)) {
1979           slti(scratch, rs, rt.imm32_);
1980           beq(scratch, zero_reg, offset);
1981         } else {
1982           r2 = scratch;
1983           li(r2, rt);
1984           slt(scratch, rs, r2);
1985           beq(scratch, zero_reg, offset);
1986         }
1987         break;
1988       case less:
1989         if (rt.imm32_ == 0) {
1990           bltz(rs, offset);
1991         } else if (is_int16(rt.imm32_)) {
1992           slti(scratch, rs, rt.imm32_);
1993           bne(scratch, zero_reg, offset);
1994         } else {
1995           r2 = scratch;
1996           li(r2, rt);
1997           slt(scratch, rs, r2);
1998           bne(scratch, zero_reg, offset);
1999         }
2000         break;
2001       case less_equal:
2002         if (rt.imm32_ == 0) {
2003           blez(rs, offset);
2004         } else {
2005           r2 = scratch;
2006           li(r2, rt);
2007           slt(scratch, r2, rs);
2008           beq(scratch, zero_reg, offset);
2009        }
2010        break;
2011       // Unsigned comparison.
2012       case Ugreater:
2013         if (rt.imm32_ == 0) {
2014           bgtz(rs, offset);
2015         } else {
2016           r2 = scratch;
2017           li(r2, rt);
2018           sltu(scratch, r2, rs);
2019           bne(scratch, zero_reg, offset);
2020         }
2021         break;
2022       case Ugreater_equal:
2023         if (rt.imm32_ == 0) {
2024           bgez(rs, offset);
2025         } else if (is_int16(rt.imm32_)) {
2026           sltiu(scratch, rs, rt.imm32_);
2027           beq(scratch, zero_reg, offset);
2028         } else {
2029           r2 = scratch;
2030           li(r2, rt);
2031           sltu(scratch, rs, r2);
2032           beq(scratch, zero_reg, offset);
2033         }
2034         break;
2035       case Uless:
2036         if (rt.imm32_ == 0) {
2037           // No code needs to be emitted.
2038           return;
2039         } else if (is_int16(rt.imm32_)) {
2040           sltiu(scratch, rs, rt.imm32_);
2041           bne(scratch, zero_reg, offset);
2042         } else {
2043           r2 = scratch;
2044           li(r2, rt);
2045           sltu(scratch, rs, r2);
2046           bne(scratch, zero_reg, offset);
2047         }
2048         break;
2049       case Uless_equal:
2050         if (rt.imm32_ == 0) {
2051           b(offset);
2052         } else {
2053           r2 = scratch;
2054           li(r2, rt);
2055           sltu(scratch, r2, rs);
2056           beq(scratch, zero_reg, offset);
2057         }
2058         break;
2059       default:
2060         UNREACHABLE();
2061     }
2062   }
2063   // Emit a nop in the branch delay slot if required.
2064   if (bdslot == PROTECT)
2065     nop();
2066 }
2067 
2068 
BranchShort(Label * L,BranchDelaySlot bdslot)2069 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2070   // We use branch_offset as an argument for the branch instructions to be sure
2071   // it is called just before generating the branch instruction, as needed.
2072 
2073   b(shifted_branch_offset(L, false));
2074 
2075   // Emit a nop in the branch delay slot if required.
2076   if (bdslot == PROTECT)
2077     nop();
2078 }
2079 
2080 
BranchShort(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2081 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
2082                                  const Operand& rt,
2083                                  BranchDelaySlot bdslot) {
2084   BRANCH_ARGS_CHECK(cond, rs, rt);
2085 
2086   int32_t offset = 0;
2087   Register r2 = no_reg;
2088   Register scratch = at;
2089   if (rt.is_reg()) {
2090     BlockTrampolinePoolScope block_trampoline_pool(this);
2091     r2 = rt.rm_;
2092     // Be careful to always use shifted_branch_offset only just before the
2093     // branch instruction, as the location will be remember for patching the
2094     // target.
2095     switch (cond) {
2096       case cc_always:
2097         offset = shifted_branch_offset(L, false);
2098         b(offset);
2099         break;
2100       case eq:
2101         offset = shifted_branch_offset(L, false);
2102         beq(rs, r2, offset);
2103         break;
2104       case ne:
2105         offset = shifted_branch_offset(L, false);
2106         bne(rs, r2, offset);
2107         break;
2108       // Signed comparison.
2109       case greater:
2110         if (r2.is(zero_reg)) {
2111           offset = shifted_branch_offset(L, false);
2112           bgtz(rs, offset);
2113         } else {
2114           slt(scratch, r2, rs);
2115           offset = shifted_branch_offset(L, false);
2116           bne(scratch, zero_reg, offset);
2117         }
2118         break;
2119       case greater_equal:
2120         if (r2.is(zero_reg)) {
2121           offset = shifted_branch_offset(L, false);
2122           bgez(rs, offset);
2123         } else {
2124           slt(scratch, rs, r2);
2125           offset = shifted_branch_offset(L, false);
2126           beq(scratch, zero_reg, offset);
2127         }
2128         break;
2129       case less:
2130         if (r2.is(zero_reg)) {
2131           offset = shifted_branch_offset(L, false);
2132           bltz(rs, offset);
2133         } else {
2134           slt(scratch, rs, r2);
2135           offset = shifted_branch_offset(L, false);
2136           bne(scratch, zero_reg, offset);
2137         }
2138         break;
2139       case less_equal:
2140         if (r2.is(zero_reg)) {
2141           offset = shifted_branch_offset(L, false);
2142           blez(rs, offset);
2143         } else {
2144           slt(scratch, r2, rs);
2145           offset = shifted_branch_offset(L, false);
2146           beq(scratch, zero_reg, offset);
2147         }
2148         break;
2149       // Unsigned comparison.
2150       case Ugreater:
2151         if (r2.is(zero_reg)) {
2152           offset = shifted_branch_offset(L, false);
2153            bgtz(rs, offset);
2154         } else {
2155           sltu(scratch, r2, rs);
2156           offset = shifted_branch_offset(L, false);
2157           bne(scratch, zero_reg, offset);
2158         }
2159         break;
2160       case Ugreater_equal:
2161         if (r2.is(zero_reg)) {
2162           offset = shifted_branch_offset(L, false);
2163           bgez(rs, offset);
2164         } else {
2165           sltu(scratch, rs, r2);
2166           offset = shifted_branch_offset(L, false);
2167           beq(scratch, zero_reg, offset);
2168         }
2169         break;
2170       case Uless:
2171         if (r2.is(zero_reg)) {
2172           // No code needs to be emitted.
2173           return;
2174         } else {
2175           sltu(scratch, rs, r2);
2176           offset = shifted_branch_offset(L, false);
2177           bne(scratch, zero_reg, offset);
2178         }
2179         break;
2180       case Uless_equal:
2181         if (r2.is(zero_reg)) {
2182           offset = shifted_branch_offset(L, false);
2183           b(offset);
2184         } else {
2185           sltu(scratch, r2, rs);
2186           offset = shifted_branch_offset(L, false);
2187           beq(scratch, zero_reg, offset);
2188         }
2189         break;
2190       default:
2191         UNREACHABLE();
2192     }
2193   } else {
2194     // Be careful to always use shifted_branch_offset only just before the
2195     // branch instruction, as the location will be remember for patching the
2196     // target.
2197     BlockTrampolinePoolScope block_trampoline_pool(this);
2198     switch (cond) {
2199       case cc_always:
2200         offset = shifted_branch_offset(L, false);
2201         b(offset);
2202         break;
2203       case eq:
2204         DCHECK(!scratch.is(rs));
2205         r2 = scratch;
2206         li(r2, rt);
2207         offset = shifted_branch_offset(L, false);
2208         beq(rs, r2, offset);
2209         break;
2210       case ne:
2211         DCHECK(!scratch.is(rs));
2212         r2 = scratch;
2213         li(r2, rt);
2214         offset = shifted_branch_offset(L, false);
2215         bne(rs, r2, offset);
2216         break;
2217       // Signed comparison.
2218       case greater:
2219         if (rt.imm32_ == 0) {
2220           offset = shifted_branch_offset(L, false);
2221           bgtz(rs, offset);
2222         } else {
2223           DCHECK(!scratch.is(rs));
2224           r2 = scratch;
2225           li(r2, rt);
2226           slt(scratch, r2, rs);
2227           offset = shifted_branch_offset(L, false);
2228           bne(scratch, zero_reg, offset);
2229         }
2230         break;
2231       case greater_equal:
2232         if (rt.imm32_ == 0) {
2233           offset = shifted_branch_offset(L, false);
2234           bgez(rs, offset);
2235         } else if (is_int16(rt.imm32_)) {
2236           slti(scratch, rs, rt.imm32_);
2237           offset = shifted_branch_offset(L, false);
2238           beq(scratch, zero_reg, offset);
2239         } else {
2240           DCHECK(!scratch.is(rs));
2241           r2 = scratch;
2242           li(r2, rt);
2243           slt(scratch, rs, r2);
2244           offset = shifted_branch_offset(L, false);
2245           beq(scratch, zero_reg, offset);
2246         }
2247         break;
2248       case less:
2249         if (rt.imm32_ == 0) {
2250           offset = shifted_branch_offset(L, false);
2251           bltz(rs, offset);
2252         } else if (is_int16(rt.imm32_)) {
2253           slti(scratch, rs, rt.imm32_);
2254           offset = shifted_branch_offset(L, false);
2255           bne(scratch, zero_reg, offset);
2256         } else {
2257           DCHECK(!scratch.is(rs));
2258           r2 = scratch;
2259           li(r2, rt);
2260           slt(scratch, rs, r2);
2261           offset = shifted_branch_offset(L, false);
2262           bne(scratch, zero_reg, offset);
2263         }
2264         break;
2265       case less_equal:
2266         if (rt.imm32_ == 0) {
2267           offset = shifted_branch_offset(L, false);
2268           blez(rs, offset);
2269         } else {
2270           DCHECK(!scratch.is(rs));
2271           r2 = scratch;
2272           li(r2, rt);
2273           slt(scratch, r2, rs);
2274           offset = shifted_branch_offset(L, false);
2275           beq(scratch, zero_reg, offset);
2276         }
2277         break;
2278       // Unsigned comparison.
2279       case Ugreater:
2280         if (rt.imm32_ == 0) {
2281           offset = shifted_branch_offset(L, false);
2282           bne(rs, zero_reg, offset);
2283         } else {
2284           DCHECK(!scratch.is(rs));
2285           r2 = scratch;
2286           li(r2, rt);
2287           sltu(scratch, r2, rs);
2288           offset = shifted_branch_offset(L, false);
2289           bne(scratch, zero_reg, offset);
2290         }
2291         break;
2292       case Ugreater_equal:
2293         if (rt.imm32_ == 0) {
2294           offset = shifted_branch_offset(L, false);
2295           bgez(rs, offset);
2296         } else if (is_int16(rt.imm32_)) {
2297           sltiu(scratch, rs, rt.imm32_);
2298           offset = shifted_branch_offset(L, false);
2299           beq(scratch, zero_reg, offset);
2300         } else {
2301           DCHECK(!scratch.is(rs));
2302           r2 = scratch;
2303           li(r2, rt);
2304           sltu(scratch, rs, r2);
2305           offset = shifted_branch_offset(L, false);
2306           beq(scratch, zero_reg, offset);
2307         }
2308         break;
2309      case Uless:
2310         if (rt.imm32_ == 0) {
2311           // No code needs to be emitted.
2312           return;
2313         } else if (is_int16(rt.imm32_)) {
2314           sltiu(scratch, rs, rt.imm32_);
2315           offset = shifted_branch_offset(L, false);
2316           bne(scratch, zero_reg, offset);
2317         } else {
2318           DCHECK(!scratch.is(rs));
2319           r2 = scratch;
2320           li(r2, rt);
2321           sltu(scratch, rs, r2);
2322           offset = shifted_branch_offset(L, false);
2323           bne(scratch, zero_reg, offset);
2324         }
2325         break;
2326       case Uless_equal:
2327         if (rt.imm32_ == 0) {
2328           offset = shifted_branch_offset(L, false);
2329           beq(rs, zero_reg, offset);
2330         } else {
2331           DCHECK(!scratch.is(rs));
2332           r2 = scratch;
2333           li(r2, rt);
2334           sltu(scratch, r2, rs);
2335           offset = shifted_branch_offset(L, false);
2336           beq(scratch, zero_reg, offset);
2337         }
2338         break;
2339       default:
2340         UNREACHABLE();
2341     }
2342   }
2343   // Check that offset could actually hold on an int16_t.
2344   DCHECK(is_int16(offset));
2345   // Emit a nop in the branch delay slot if required.
2346   if (bdslot == PROTECT)
2347     nop();
2348 }
2349 
2350 
BranchAndLink(int16_t offset,BranchDelaySlot bdslot)2351 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2352   BranchAndLinkShort(offset, bdslot);
2353 }
2354 
2355 
BranchAndLink(int16_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2356 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2357                                    const Operand& rt,
2358                                    BranchDelaySlot bdslot) {
2359   BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2360 }
2361 
2362 
BranchAndLink(Label * L,BranchDelaySlot bdslot)2363 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2364   if (L->is_bound()) {
2365     if (is_near(L)) {
2366       BranchAndLinkShort(L, bdslot);
2367     } else {
2368       Jalr(L, bdslot);
2369     }
2370   } else {
2371     if (is_trampoline_emitted()) {
2372       Jalr(L, bdslot);
2373     } else {
2374       BranchAndLinkShort(L, bdslot);
2375     }
2376   }
2377 }
2378 
2379 
BranchAndLink(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2380 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2381                                    const Operand& rt,
2382                                    BranchDelaySlot bdslot) {
2383   if (L->is_bound()) {
2384     if (is_near(L)) {
2385       BranchAndLinkShort(L, cond, rs, rt, bdslot);
2386     } else {
2387       Label skip;
2388       Condition neg_cond = NegateCondition(cond);
2389       BranchShort(&skip, neg_cond, rs, rt);
2390       Jalr(L, bdslot);
2391       bind(&skip);
2392     }
2393   } else {
2394     if (is_trampoline_emitted()) {
2395       Label skip;
2396       Condition neg_cond = NegateCondition(cond);
2397       BranchShort(&skip, neg_cond, rs, rt);
2398       Jalr(L, bdslot);
2399       bind(&skip);
2400     } else {
2401       BranchAndLinkShort(L, cond, rs, rt, bdslot);
2402     }
2403   }
2404 }
2405 
2406 
2407 // We need to use a bgezal or bltzal, but they can't be used directly with the
2408 // slt instructions. We could use sub or add instead but we would miss overflow
2409 // cases, so we keep slt and add an intermediate third instruction.
BranchAndLinkShort(int16_t offset,BranchDelaySlot bdslot)2410 void MacroAssembler::BranchAndLinkShort(int16_t offset,
2411                                         BranchDelaySlot bdslot) {
2412   bal(offset);
2413 
2414   // Emit a nop in the branch delay slot if required.
2415   if (bdslot == PROTECT)
2416     nop();
2417 }
2418 
2419 
BranchAndLinkShort(int16_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2420 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2421                                         Register rs, const Operand& rt,
2422                                         BranchDelaySlot bdslot) {
2423   BRANCH_ARGS_CHECK(cond, rs, rt);
2424   Register r2 = no_reg;
2425   Register scratch = at;
2426 
2427   if (rt.is_reg()) {
2428     r2 = rt.rm_;
2429   } else if (cond != cc_always) {
2430     r2 = scratch;
2431     li(r2, rt);
2432   }
2433 
2434   if (!IsMipsArchVariant(kMips32r6)) {
2435     BlockTrampolinePoolScope block_trampoline_pool(this);
2436     switch (cond) {
2437       case cc_always:
2438         bal(offset);
2439         break;
2440       case eq:
2441         bne(rs, r2, 2);
2442         nop();
2443         bal(offset);
2444         break;
2445       case ne:
2446         beq(rs, r2, 2);
2447         nop();
2448         bal(offset);
2449         break;
2450 
2451       // Signed comparison.
2452       case greater:
2453         slt(scratch, r2, rs);
2454         addiu(scratch, scratch, -1);
2455         bgezal(scratch, offset);
2456         break;
2457       case greater_equal:
2458         slt(scratch, rs, r2);
2459         addiu(scratch, scratch, -1);
2460         bltzal(scratch, offset);
2461         break;
2462       case less:
2463         slt(scratch, rs, r2);
2464         addiu(scratch, scratch, -1);
2465         bgezal(scratch, offset);
2466         break;
2467       case less_equal:
2468         slt(scratch, r2, rs);
2469         addiu(scratch, scratch, -1);
2470         bltzal(scratch, offset);
2471         break;
2472 
2473       // Unsigned comparison.
2474       case Ugreater:
2475         sltu(scratch, r2, rs);
2476         addiu(scratch, scratch, -1);
2477         bgezal(scratch, offset);
2478         break;
2479       case Ugreater_equal:
2480         sltu(scratch, rs, r2);
2481         addiu(scratch, scratch, -1);
2482         bltzal(scratch, offset);
2483         break;
2484       case Uless:
2485         sltu(scratch, rs, r2);
2486         addiu(scratch, scratch, -1);
2487         bgezal(scratch, offset);
2488         break;
2489       case Uless_equal:
2490         sltu(scratch, r2, rs);
2491         addiu(scratch, scratch, -1);
2492         bltzal(scratch, offset);
2493         break;
2494 
2495       default:
2496         UNREACHABLE();
2497     }
2498   } else {
2499     BlockTrampolinePoolScope block_trampoline_pool(this);
2500     switch (cond) {
2501       case cc_always:
2502         bal(offset);
2503         break;
2504       case eq:
2505         bne(rs, r2, 2);
2506         nop();
2507         bal(offset);
2508         break;
2509       case ne:
2510         beq(rs, r2, 2);
2511         nop();
2512         bal(offset);
2513         break;
2514 
2515       // Signed comparison.
2516       case greater:
2517         // rs > rt
2518         slt(scratch, r2, rs);
2519         beq(scratch, zero_reg, 2);
2520         nop();
2521         bal(offset);
2522         break;
2523       case greater_equal:
2524         // rs >= rt
2525         slt(scratch, rs, r2);
2526         bne(scratch, zero_reg, 2);
2527         nop();
2528         bal(offset);
2529         break;
2530       case less:
2531         // rs < r2
2532         slt(scratch, rs, r2);
2533         bne(scratch, zero_reg, 2);
2534         nop();
2535         bal(offset);
2536         break;
2537       case less_equal:
2538         // rs <= r2
2539         slt(scratch, r2, rs);
2540         bne(scratch, zero_reg, 2);
2541         nop();
2542         bal(offset);
2543         break;
2544 
2545 
2546       // Unsigned comparison.
2547       case Ugreater:
2548         // rs > rt
2549         sltu(scratch, r2, rs);
2550         beq(scratch, zero_reg, 2);
2551         nop();
2552         bal(offset);
2553         break;
2554       case Ugreater_equal:
2555         // rs >= rt
2556         sltu(scratch, rs, r2);
2557         bne(scratch, zero_reg, 2);
2558         nop();
2559         bal(offset);
2560         break;
2561       case Uless:
2562         // rs < r2
2563         sltu(scratch, rs, r2);
2564         bne(scratch, zero_reg, 2);
2565         nop();
2566         bal(offset);
2567         break;
2568       case Uless_equal:
2569         // rs <= r2
2570         sltu(scratch, r2, rs);
2571         bne(scratch, zero_reg, 2);
2572         nop();
2573         bal(offset);
2574         break;
2575       default:
2576         UNREACHABLE();
2577     }
2578   }
2579 
2580   // Emit a nop in the branch delay slot if required.
2581   if (bdslot == PROTECT)
2582     nop();
2583 }
2584 
2585 
BranchAndLinkShort(Label * L,BranchDelaySlot bdslot)2586 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2587   bal(shifted_branch_offset(L, false));
2588 
2589   // Emit a nop in the branch delay slot if required.
2590   if (bdslot == PROTECT)
2591     nop();
2592 }
2593 
2594 
BranchAndLinkShort(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2595 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2596                                         const Operand& rt,
2597                                         BranchDelaySlot bdslot) {
2598   BRANCH_ARGS_CHECK(cond, rs, rt);
2599 
2600   int32_t offset = 0;
2601   Register r2 = no_reg;
2602   Register scratch = at;
2603   if (rt.is_reg()) {
2604     r2 = rt.rm_;
2605   } else if (cond != cc_always) {
2606     r2 = scratch;
2607     li(r2, rt);
2608   }
2609 
2610   if (!IsMipsArchVariant(kMips32r6)) {
2611     BlockTrampolinePoolScope block_trampoline_pool(this);
2612     switch (cond) {
2613       case cc_always:
2614         offset = shifted_branch_offset(L, false);
2615         bal(offset);
2616         break;
2617       case eq:
2618         bne(rs, r2, 2);
2619         nop();
2620         offset = shifted_branch_offset(L, false);
2621         bal(offset);
2622         break;
2623       case ne:
2624         beq(rs, r2, 2);
2625         nop();
2626         offset = shifted_branch_offset(L, false);
2627         bal(offset);
2628         break;
2629 
2630       // Signed comparison.
2631       case greater:
2632         slt(scratch, r2, rs);
2633         addiu(scratch, scratch, -1);
2634         offset = shifted_branch_offset(L, false);
2635         bgezal(scratch, offset);
2636         break;
2637       case greater_equal:
2638         slt(scratch, rs, r2);
2639         addiu(scratch, scratch, -1);
2640         offset = shifted_branch_offset(L, false);
2641         bltzal(scratch, offset);
2642         break;
2643       case less:
2644         slt(scratch, rs, r2);
2645         addiu(scratch, scratch, -1);
2646         offset = shifted_branch_offset(L, false);
2647         bgezal(scratch, offset);
2648         break;
2649       case less_equal:
2650         slt(scratch, r2, rs);
2651         addiu(scratch, scratch, -1);
2652         offset = shifted_branch_offset(L, false);
2653         bltzal(scratch, offset);
2654         break;
2655 
2656       // Unsigned comparison.
2657       case Ugreater:
2658         sltu(scratch, r2, rs);
2659         addiu(scratch, scratch, -1);
2660         offset = shifted_branch_offset(L, false);
2661         bgezal(scratch, offset);
2662         break;
2663       case Ugreater_equal:
2664         sltu(scratch, rs, r2);
2665         addiu(scratch, scratch, -1);
2666         offset = shifted_branch_offset(L, false);
2667         bltzal(scratch, offset);
2668         break;
2669       case Uless:
2670         sltu(scratch, rs, r2);
2671         addiu(scratch, scratch, -1);
2672         offset = shifted_branch_offset(L, false);
2673         bgezal(scratch, offset);
2674         break;
2675       case Uless_equal:
2676         sltu(scratch, r2, rs);
2677         addiu(scratch, scratch, -1);
2678         offset = shifted_branch_offset(L, false);
2679         bltzal(scratch, offset);
2680         break;
2681 
2682       default:
2683         UNREACHABLE();
2684     }
2685   } else {
2686     BlockTrampolinePoolScope block_trampoline_pool(this);
2687     switch (cond) {
2688       case cc_always:
2689         offset = shifted_branch_offset(L, false);
2690         bal(offset);
2691         break;
2692       case eq:
2693         bne(rs, r2, 2);
2694         nop();
2695         offset = shifted_branch_offset(L, false);
2696         bal(offset);
2697         break;
2698       case ne:
2699         beq(rs, r2, 2);
2700         nop();
2701         offset = shifted_branch_offset(L, false);
2702         bal(offset);
2703         break;
2704 
2705       // Signed comparison.
2706       case greater:
2707         // rs > rt
2708         slt(scratch, r2, rs);
2709         beq(scratch, zero_reg, 2);
2710         nop();
2711         offset = shifted_branch_offset(L, false);
2712         bal(offset);
2713         break;
2714       case greater_equal:
2715         // rs >= rt
2716         slt(scratch, rs, r2);
2717         bne(scratch, zero_reg, 2);
2718         nop();
2719         offset = shifted_branch_offset(L, false);
2720         bal(offset);
2721         break;
2722       case less:
2723         // rs < r2
2724         slt(scratch, rs, r2);
2725         bne(scratch, zero_reg, 2);
2726         nop();
2727         offset = shifted_branch_offset(L, false);
2728         bal(offset);
2729         break;
2730       case less_equal:
2731         // rs <= r2
2732         slt(scratch, r2, rs);
2733         bne(scratch, zero_reg, 2);
2734         nop();
2735         offset = shifted_branch_offset(L, false);
2736         bal(offset);
2737         break;
2738 
2739 
2740       // Unsigned comparison.
2741       case Ugreater:
2742         // rs > rt
2743         sltu(scratch, r2, rs);
2744         beq(scratch, zero_reg, 2);
2745         nop();
2746         offset = shifted_branch_offset(L, false);
2747         bal(offset);
2748         break;
2749       case Ugreater_equal:
2750         // rs >= rt
2751         sltu(scratch, rs, r2);
2752         bne(scratch, zero_reg, 2);
2753         nop();
2754         offset = shifted_branch_offset(L, false);
2755         bal(offset);
2756         break;
2757       case Uless:
2758         // rs < r2
2759         sltu(scratch, rs, r2);
2760         bne(scratch, zero_reg, 2);
2761         nop();
2762         offset = shifted_branch_offset(L, false);
2763         bal(offset);
2764         break;
2765       case Uless_equal:
2766         // rs <= r2
2767         sltu(scratch, r2, rs);
2768         bne(scratch, zero_reg, 2);
2769         nop();
2770         offset = shifted_branch_offset(L, false);
2771         bal(offset);
2772         break;
2773 
2774       default:
2775         UNREACHABLE();
2776     }
2777   }
2778 
2779   // Check that offset could actually hold on an int16_t.
2780   DCHECK(is_int16(offset));
2781 
2782   // Emit a nop in the branch delay slot if required.
2783   if (bdslot == PROTECT)
2784     nop();
2785 }
2786 
2787 
Jump(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2788 void MacroAssembler::Jump(Register target,
2789                           Condition cond,
2790                           Register rs,
2791                           const Operand& rt,
2792                           BranchDelaySlot bd) {
2793   BlockTrampolinePoolScope block_trampoline_pool(this);
2794   if (cond == cc_always) {
2795     jr(target);
2796   } else {
2797     BRANCH_ARGS_CHECK(cond, rs, rt);
2798     Branch(2, NegateCondition(cond), rs, rt);
2799     jr(target);
2800   }
2801   // Emit a nop in the branch delay slot if required.
2802   if (bd == PROTECT)
2803     nop();
2804 }
2805 
2806 
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2807 void MacroAssembler::Jump(intptr_t target,
2808                           RelocInfo::Mode rmode,
2809                           Condition cond,
2810                           Register rs,
2811                           const Operand& rt,
2812                           BranchDelaySlot bd) {
2813   Label skip;
2814   if (cond != cc_always) {
2815     Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2816   }
2817   // The first instruction of 'li' may be placed in the delay slot.
2818   // This is not an issue, t9 is expected to be clobbered anyway.
2819   li(t9, Operand(target, rmode));
2820   Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2821   bind(&skip);
2822 }
2823 
2824 
Jump(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2825 void MacroAssembler::Jump(Address target,
2826                           RelocInfo::Mode rmode,
2827                           Condition cond,
2828                           Register rs,
2829                           const Operand& rt,
2830                           BranchDelaySlot bd) {
2831   DCHECK(!RelocInfo::IsCodeTarget(rmode));
2832   Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2833 }
2834 
2835 
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2836 void MacroAssembler::Jump(Handle<Code> code,
2837                           RelocInfo::Mode rmode,
2838                           Condition cond,
2839                           Register rs,
2840                           const Operand& rt,
2841                           BranchDelaySlot bd) {
2842   DCHECK(RelocInfo::IsCodeTarget(rmode));
2843   AllowDeferredHandleDereference embedding_raw_address;
2844   Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2845 }
2846 
2847 
CallSize(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2848 int MacroAssembler::CallSize(Register target,
2849                              Condition cond,
2850                              Register rs,
2851                              const Operand& rt,
2852                              BranchDelaySlot bd) {
2853   int size = 0;
2854 
2855   if (cond == cc_always) {
2856     size += 1;
2857   } else {
2858     size += 3;
2859   }
2860 
2861   if (bd == PROTECT)
2862     size += 1;
2863 
2864   return size * kInstrSize;
2865 }
2866 
2867 
2868 // Note: To call gcc-compiled C code on mips, you must call thru t9.
Call(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2869 void MacroAssembler::Call(Register target,
2870                           Condition cond,
2871                           Register rs,
2872                           const Operand& rt,
2873                           BranchDelaySlot bd) {
2874   BlockTrampolinePoolScope block_trampoline_pool(this);
2875   Label start;
2876   bind(&start);
2877   if (cond == cc_always) {
2878     jalr(target);
2879   } else {
2880     BRANCH_ARGS_CHECK(cond, rs, rt);
2881     Branch(2, NegateCondition(cond), rs, rt);
2882     jalr(target);
2883   }
2884   // Emit a nop in the branch delay slot if required.
2885   if (bd == PROTECT)
2886     nop();
2887 
2888   DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
2889             SizeOfCodeGeneratedSince(&start));
2890 }
2891 
2892 
CallSize(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2893 int MacroAssembler::CallSize(Address target,
2894                              RelocInfo::Mode rmode,
2895                              Condition cond,
2896                              Register rs,
2897                              const Operand& rt,
2898                              BranchDelaySlot bd) {
2899   int size = CallSize(t9, cond, rs, rt, bd);
2900   return size + 2 * kInstrSize;
2901 }
2902 
2903 
Call(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2904 void MacroAssembler::Call(Address target,
2905                           RelocInfo::Mode rmode,
2906                           Condition cond,
2907                           Register rs,
2908                           const Operand& rt,
2909                           BranchDelaySlot bd) {
2910   BlockTrampolinePoolScope block_trampoline_pool(this);
2911   Label start;
2912   bind(&start);
2913   int32_t target_int = reinterpret_cast<int32_t>(target);
2914   // Must record previous source positions before the
2915   // li() generates a new code target.
2916   positions_recorder()->WriteRecordedPositions();
2917   li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
2918   Call(t9, cond, rs, rt, bd);
2919   DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2920             SizeOfCodeGeneratedSince(&start));
2921 }
2922 
2923 
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2924 int MacroAssembler::CallSize(Handle<Code> code,
2925                              RelocInfo::Mode rmode,
2926                              TypeFeedbackId ast_id,
2927                              Condition cond,
2928                              Register rs,
2929                              const Operand& rt,
2930                              BranchDelaySlot bd) {
2931   AllowDeferredHandleDereference using_raw_address;
2932   return CallSize(reinterpret_cast<Address>(code.location()),
2933       rmode, cond, rs, rt, bd);
2934 }
2935 
2936 
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2937 void MacroAssembler::Call(Handle<Code> code,
2938                           RelocInfo::Mode rmode,
2939                           TypeFeedbackId ast_id,
2940                           Condition cond,
2941                           Register rs,
2942                           const Operand& rt,
2943                           BranchDelaySlot bd) {
2944   BlockTrampolinePoolScope block_trampoline_pool(this);
2945   Label start;
2946   bind(&start);
2947   DCHECK(RelocInfo::IsCodeTarget(rmode));
2948   if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
2949     SetRecordedAstId(ast_id);
2950     rmode = RelocInfo::CODE_TARGET_WITH_ID;
2951   }
2952   AllowDeferredHandleDereference embedding_raw_address;
2953   Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2954   DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2955             SizeOfCodeGeneratedSince(&start));
2956 }
2957 
2958 
Ret(Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)2959 void MacroAssembler::Ret(Condition cond,
2960                          Register rs,
2961                          const Operand& rt,
2962                          BranchDelaySlot bd) {
2963   Jump(ra, cond, rs, rt, bd);
2964 }
2965 
2966 
J(Label * L,BranchDelaySlot bdslot)2967 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2968   BlockTrampolinePoolScope block_trampoline_pool(this);
2969 
2970   uint32_t imm28;
2971   imm28 = jump_address(L);
2972   imm28 &= kImm28Mask;
2973   { BlockGrowBufferScope block_buf_growth(this);
2974     // Buffer growth (and relocation) must be blocked for internal references
2975     // until associated instructions are emitted and available to be patched.
2976     RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2977     j(imm28);
2978   }
2979   // Emit a nop in the branch delay slot if required.
2980   if (bdslot == PROTECT)
2981     nop();
2982 }
2983 
2984 
Jr(Label * L,BranchDelaySlot bdslot)2985 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2986   BlockTrampolinePoolScope block_trampoline_pool(this);
2987 
2988   uint32_t imm32;
2989   imm32 = jump_address(L);
2990   { BlockGrowBufferScope block_buf_growth(this);
2991     // Buffer growth (and relocation) must be blocked for internal references
2992     // until associated instructions are emitted and available to be patched.
2993     RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2994     lui(at, (imm32 & kHiMask) >> kLuiShift);
2995     ori(at, at, (imm32 & kImm16Mask));
2996   }
2997   jr(at);
2998 
2999   // Emit a nop in the branch delay slot if required.
3000   if (bdslot == PROTECT)
3001     nop();
3002 }
3003 
3004 
Jalr(Label * L,BranchDelaySlot bdslot)3005 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
3006   BlockTrampolinePoolScope block_trampoline_pool(this);
3007 
3008   uint32_t imm32;
3009   imm32 = jump_address(L);
3010   { BlockGrowBufferScope block_buf_growth(this);
3011     // Buffer growth (and relocation) must be blocked for internal references
3012     // until associated instructions are emitted and available to be patched.
3013     RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3014     lui(at, (imm32 & kHiMask) >> kLuiShift);
3015     ori(at, at, (imm32 & kImm16Mask));
3016   }
3017   jalr(at);
3018 
3019   // Emit a nop in the branch delay slot if required.
3020   if (bdslot == PROTECT)
3021     nop();
3022 }
3023 
3024 
DropAndRet(int drop)3025 void MacroAssembler::DropAndRet(int drop) {
3026   Ret(USE_DELAY_SLOT);
3027   addiu(sp, sp, drop * kPointerSize);
3028 }
3029 
DropAndRet(int drop,Condition cond,Register r1,const Operand & r2)3030 void MacroAssembler::DropAndRet(int drop,
3031                                 Condition cond,
3032                                 Register r1,
3033                                 const Operand& r2) {
3034   // Both Drop and Ret need to be conditional.
3035   Label skip;
3036   if (cond != cc_always) {
3037     Branch(&skip, NegateCondition(cond), r1, r2);
3038   }
3039 
3040   Drop(drop);
3041   Ret();
3042 
3043   if (cond != cc_always) {
3044     bind(&skip);
3045   }
3046 }
3047 
3048 
Drop(int count,Condition cond,Register reg,const Operand & op)3049 void MacroAssembler::Drop(int count,
3050                           Condition cond,
3051                           Register reg,
3052                           const Operand& op) {
3053   if (count <= 0) {
3054     return;
3055   }
3056 
3057   Label skip;
3058 
3059   if (cond != al) {
3060      Branch(&skip, NegateCondition(cond), reg, op);
3061   }
3062 
3063   addiu(sp, sp, count * kPointerSize);
3064 
3065   if (cond != al) {
3066     bind(&skip);
3067   }
3068 }
3069 
3070 
3071 
Swap(Register reg1,Register reg2,Register scratch)3072 void MacroAssembler::Swap(Register reg1,
3073                           Register reg2,
3074                           Register scratch) {
3075   if (scratch.is(no_reg)) {
3076     Xor(reg1, reg1, Operand(reg2));
3077     Xor(reg2, reg2, Operand(reg1));
3078     Xor(reg1, reg1, Operand(reg2));
3079   } else {
3080     mov(scratch, reg1);
3081     mov(reg1, reg2);
3082     mov(reg2, scratch);
3083   }
3084 }
3085 
3086 
Call(Label * target)3087 void MacroAssembler::Call(Label* target) {
3088   BranchAndLink(target);
3089 }
3090 
3091 
Push(Handle<Object> handle)3092 void MacroAssembler::Push(Handle<Object> handle) {
3093   li(at, Operand(handle));
3094   push(at);
3095 }
3096 
3097 
DebugBreak()3098 void MacroAssembler::DebugBreak() {
3099   PrepareCEntryArgs(0);
3100   PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
3101   CEntryStub ces(isolate(), 1);
3102   DCHECK(AllowThisStubCall(&ces));
3103   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3104 }
3105 
3106 
3107 // ---------------------------------------------------------------------------
3108 // Exception handling.
3109 
PushTryHandler(StackHandler::Kind kind,int handler_index)3110 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3111                                     int handler_index) {
3112   // Adjust this code if not the case.
3113   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3114   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3115   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3116   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3117   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3118   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3119 
3120   // For the JSEntry handler, we must preserve a0-a3 and s0.
3121   // t1-t3 are available. We will build up the handler from the bottom by
3122   // pushing on the stack.
3123   // Set up the code object (t1) and the state (t2) for pushing.
3124   unsigned state =
3125       StackHandler::IndexField::encode(handler_index) |
3126       StackHandler::KindField::encode(kind);
3127   li(t1, Operand(CodeObject()), CONSTANT_SIZE);
3128   li(t2, Operand(state));
3129 
3130   // Push the frame pointer, context, state, and code object.
3131   if (kind == StackHandler::JS_ENTRY) {
3132     DCHECK_EQ(Smi::FromInt(0), 0);
3133     // The second zero_reg indicates no context.
3134     // The first zero_reg is the NULL frame pointer.
3135     // The operands are reversed to match the order of MultiPush/Pop.
3136     Push(zero_reg, zero_reg, t2, t1);
3137   } else {
3138     MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
3139   }
3140 
3141   // Link the current handler as the next handler.
3142   li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3143   lw(t1, MemOperand(t2));
3144   push(t1);
3145   // Set this new handler as the current one.
3146   sw(sp, MemOperand(t2));
3147 }
3148 
3149 
PopTryHandler()3150 void MacroAssembler::PopTryHandler() {
3151   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3152   pop(a1);
3153   Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
3154   li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3155   sw(a1, MemOperand(at));
3156 }
3157 
3158 
JumpToHandlerEntry()3159 void MacroAssembler::JumpToHandlerEntry() {
3160   // Compute the handler entry address and jump to it.  The handler table is
3161   // a fixed array of (smi-tagged) code offsets.
3162   // v0 = exception, a1 = code object, a2 = state.
3163   lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset));  // Handler table.
3164   Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3165   srl(a2, a2, StackHandler::kKindWidth);  // Handler index.
3166   sll(a2, a2, kPointerSizeLog2);
3167   Addu(a2, a3, a2);
3168   lw(a2, MemOperand(a2));  // Smi-tagged offset.
3169   Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
3170   sra(t9, a2, kSmiTagSize);
3171   Addu(t9, t9, a1);
3172   Jump(t9);  // Jump.
3173 }
3174 
3175 
Throw(Register value)3176 void MacroAssembler::Throw(Register value) {
3177   // Adjust this code if not the case.
3178   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3179   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3180   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3181   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3182   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3183   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3184 
3185   // The exception is expected in v0.
3186   Move(v0, value);
3187 
3188   // Drop the stack pointer to the top of the top handler.
3189   li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
3190                                    isolate())));
3191   lw(sp, MemOperand(a3));
3192 
3193   // Restore the next handler.
3194   pop(a2);
3195   sw(a2, MemOperand(a3));
3196 
3197   // Get the code object (a1) and state (a2).  Restore the context and frame
3198   // pointer.
3199   MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
3200 
3201   // If the handler is a JS frame, restore the context to the frame.
3202   // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
3203   // or cp.
3204   Label done;
3205   Branch(&done, eq, cp, Operand(zero_reg));
3206   sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3207   bind(&done);
3208 
3209   JumpToHandlerEntry();
3210 }
3211 
3212 
ThrowUncatchable(Register value)3213 void MacroAssembler::ThrowUncatchable(Register value) {
3214   // Adjust this code if not the case.
3215   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3216   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3217   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3218   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3219   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3220   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3221 
3222   // The exception is expected in v0.
3223   if (!value.is(v0)) {
3224     mov(v0, value);
3225   }
3226   // Drop the stack pointer to the top of the top stack handler.
3227   li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3228   lw(sp, MemOperand(a3));
3229 
3230   // Unwind the handlers until the ENTRY handler is found.
3231   Label fetch_next, check_kind;
3232   jmp(&check_kind);
3233   bind(&fetch_next);
3234   lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
3235 
3236   bind(&check_kind);
3237   STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
3238   lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
3239   And(a2, a2, Operand(StackHandler::KindField::kMask));
3240   Branch(&fetch_next, ne, a2, Operand(zero_reg));
3241 
3242   // Set the top handler address to next handler past the top ENTRY handler.
3243   pop(a2);
3244   sw(a2, MemOperand(a3));
3245 
3246   // Get the code object (a1) and state (a2).  Clear the context and frame
3247   // pointer (0 was saved in the handler).
3248   MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
3249 
3250   JumpToHandlerEntry();
3251 }
3252 
3253 
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)3254 void MacroAssembler::Allocate(int object_size,
3255                               Register result,
3256                               Register scratch1,
3257                               Register scratch2,
3258                               Label* gc_required,
3259                               AllocationFlags flags) {
3260   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3261   if (!FLAG_inline_new) {
3262     if (emit_debug_code()) {
3263       // Trash the registers to simulate an allocation failure.
3264       li(result, 0x7091);
3265       li(scratch1, 0x7191);
3266       li(scratch2, 0x7291);
3267     }
3268     jmp(gc_required);
3269     return;
3270   }
3271 
3272   DCHECK(!result.is(scratch1));
3273   DCHECK(!result.is(scratch2));
3274   DCHECK(!scratch1.is(scratch2));
3275   DCHECK(!scratch1.is(t9));
3276   DCHECK(!scratch2.is(t9));
3277   DCHECK(!result.is(t9));
3278 
3279   // Make object size into bytes.
3280   if ((flags & SIZE_IN_WORDS) != 0) {
3281     object_size *= kPointerSize;
3282   }
3283   DCHECK_EQ(0, object_size & kObjectAlignmentMask);
3284 
3285   // Check relative positions of allocation top and limit addresses.
3286   // ARM adds additional checks to make sure the ldm instruction can be
3287   // used. On MIPS we don't have ldm so we don't need additional checks either.
3288   ExternalReference allocation_top =
3289       AllocationUtils::GetAllocationTopReference(isolate(), flags);
3290   ExternalReference allocation_limit =
3291       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3292 
3293   intptr_t top   =
3294       reinterpret_cast<intptr_t>(allocation_top.address());
3295   intptr_t limit =
3296       reinterpret_cast<intptr_t>(allocation_limit.address());
3297   DCHECK((limit - top) == kPointerSize);
3298 
3299   // Set up allocation top address and object size registers.
3300   Register topaddr = scratch1;
3301   li(topaddr, Operand(allocation_top));
3302 
3303   // This code stores a temporary value in t9.
3304   if ((flags & RESULT_CONTAINS_TOP) == 0) {
3305     // Load allocation top into result and allocation limit into t9.
3306     lw(result, MemOperand(topaddr));
3307     lw(t9, MemOperand(topaddr, kPointerSize));
3308   } else {
3309     if (emit_debug_code()) {
3310       // Assert that result actually contains top on entry. t9 is used
3311       // immediately below so this use of t9 does not cause difference with
3312       // respect to register content between debug and release mode.
3313       lw(t9, MemOperand(topaddr));
3314       Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3315     }
3316     // Load allocation limit into t9. Result already contains allocation top.
3317     lw(t9, MemOperand(topaddr, limit - top));
3318   }
3319 
3320   if ((flags & DOUBLE_ALIGNMENT) != 0) {
3321     // Align the next allocation. Storing the filler map without checking top is
3322     // safe in new-space because the limit of the heap is aligned there.
3323     DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
3324     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
3325     And(scratch2, result, Operand(kDoubleAlignmentMask));
3326     Label aligned;
3327     Branch(&aligned, eq, scratch2, Operand(zero_reg));
3328     if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
3329       Branch(gc_required, Ugreater_equal, result, Operand(t9));
3330     }
3331     li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3332     sw(scratch2, MemOperand(result));
3333     Addu(result, result, Operand(kDoubleSize / 2));
3334     bind(&aligned);
3335   }
3336 
3337   // Calculate new top and bail out if new space is exhausted. Use result
3338   // to calculate the new top.
3339   Addu(scratch2, result, Operand(object_size));
3340   Branch(gc_required, Ugreater, scratch2, Operand(t9));
3341   sw(scratch2, MemOperand(topaddr));
3342 
3343   // Tag object if requested.
3344   if ((flags & TAG_OBJECT) != 0) {
3345     Addu(result, result, Operand(kHeapObjectTag));
3346   }
3347 }
3348 
3349 
Allocate(Register object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)3350 void MacroAssembler::Allocate(Register object_size,
3351                               Register result,
3352                               Register scratch1,
3353                               Register scratch2,
3354                               Label* gc_required,
3355                               AllocationFlags flags) {
3356   if (!FLAG_inline_new) {
3357     if (emit_debug_code()) {
3358       // Trash the registers to simulate an allocation failure.
3359       li(result, 0x7091);
3360       li(scratch1, 0x7191);
3361       li(scratch2, 0x7291);
3362     }
3363     jmp(gc_required);
3364     return;
3365   }
3366 
3367   DCHECK(!result.is(scratch1));
3368   DCHECK(!result.is(scratch2));
3369   DCHECK(!scratch1.is(scratch2));
3370   DCHECK(!object_size.is(t9));
3371   DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
3372 
3373   // Check relative positions of allocation top and limit addresses.
3374   // ARM adds additional checks to make sure the ldm instruction can be
3375   // used. On MIPS we don't have ldm so we don't need additional checks either.
3376   ExternalReference allocation_top =
3377       AllocationUtils::GetAllocationTopReference(isolate(), flags);
3378   ExternalReference allocation_limit =
3379       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3380   intptr_t top   =
3381       reinterpret_cast<intptr_t>(allocation_top.address());
3382   intptr_t limit =
3383       reinterpret_cast<intptr_t>(allocation_limit.address());
3384   DCHECK((limit - top) == kPointerSize);
3385 
3386   // Set up allocation top address and object size registers.
3387   Register topaddr = scratch1;
3388   li(topaddr, Operand(allocation_top));
3389 
3390   // This code stores a temporary value in t9.
3391   if ((flags & RESULT_CONTAINS_TOP) == 0) {
3392     // Load allocation top into result and allocation limit into t9.
3393     lw(result, MemOperand(topaddr));
3394     lw(t9, MemOperand(topaddr, kPointerSize));
3395   } else {
3396     if (emit_debug_code()) {
3397       // Assert that result actually contains top on entry. t9 is used
3398       // immediately below so this use of t9 does not cause difference with
3399       // respect to register content between debug and release mode.
3400       lw(t9, MemOperand(topaddr));
3401       Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3402     }
3403     // Load allocation limit into t9. Result already contains allocation top.
3404     lw(t9, MemOperand(topaddr, limit - top));
3405   }
3406 
3407   if ((flags & DOUBLE_ALIGNMENT) != 0) {
3408     // Align the next allocation. Storing the filler map without checking top is
3409     // safe in new-space because the limit of the heap is aligned there.
3410     DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
3411     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
3412     And(scratch2, result, Operand(kDoubleAlignmentMask));
3413     Label aligned;
3414     Branch(&aligned, eq, scratch2, Operand(zero_reg));
3415     if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
3416       Branch(gc_required, Ugreater_equal, result, Operand(t9));
3417     }
3418     li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3419     sw(scratch2, MemOperand(result));
3420     Addu(result, result, Operand(kDoubleSize / 2));
3421     bind(&aligned);
3422   }
3423 
3424   // Calculate new top and bail out if new space is exhausted. Use result
3425   // to calculate the new top. Object size may be in words so a shift is
3426   // required to get the number of bytes.
3427   if ((flags & SIZE_IN_WORDS) != 0) {
3428     sll(scratch2, object_size, kPointerSizeLog2);
3429     Addu(scratch2, result, scratch2);
3430   } else {
3431     Addu(scratch2, result, Operand(object_size));
3432   }
3433   Branch(gc_required, Ugreater, scratch2, Operand(t9));
3434 
3435   // Update allocation top. result temporarily holds the new top.
3436   if (emit_debug_code()) {
3437     And(t9, scratch2, Operand(kObjectAlignmentMask));
3438     Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3439   }
3440   sw(scratch2, MemOperand(topaddr));
3441 
3442   // Tag object if requested.
3443   if ((flags & TAG_OBJECT) != 0) {
3444     Addu(result, result, Operand(kHeapObjectTag));
3445   }
3446 }
3447 
3448 
UndoAllocationInNewSpace(Register object,Register scratch)3449 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3450                                               Register scratch) {
3451   ExternalReference new_space_allocation_top =
3452       ExternalReference::new_space_allocation_top_address(isolate());
3453 
3454   // Make sure the object has no tag before resetting top.
3455   And(object, object, Operand(~kHeapObjectTagMask));
3456 #ifdef DEBUG
3457   // Check that the object un-allocated is below the current top.
3458   li(scratch, Operand(new_space_allocation_top));
3459   lw(scratch, MemOperand(scratch));
3460   Check(less, kUndoAllocationOfNonAllocatedMemory,
3461       object, Operand(scratch));
3462 #endif
3463   // Write the address of the object to un-allocate as the current top.
3464   li(scratch, Operand(new_space_allocation_top));
3465   sw(object, MemOperand(scratch));
3466 }
3467 
3468 
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)3469 void MacroAssembler::AllocateTwoByteString(Register result,
3470                                            Register length,
3471                                            Register scratch1,
3472                                            Register scratch2,
3473                                            Register scratch3,
3474                                            Label* gc_required) {
3475   // Calculate the number of bytes needed for the characters in the string while
3476   // observing object alignment.
3477   DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3478   sll(scratch1, length, 1);  // Length in bytes, not chars.
3479   addiu(scratch1, scratch1,
3480        kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3481   And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3482 
3483   // Allocate two-byte string in new space.
3484   Allocate(scratch1,
3485            result,
3486            scratch2,
3487            scratch3,
3488            gc_required,
3489            TAG_OBJECT);
3490 
3491   // Set the map, length and hash field.
3492   InitializeNewString(result,
3493                       length,
3494                       Heap::kStringMapRootIndex,
3495                       scratch1,
3496                       scratch2);
3497 }
3498 
3499 
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)3500 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3501                                            Register scratch1, Register scratch2,
3502                                            Register scratch3,
3503                                            Label* gc_required) {
3504   // Calculate the number of bytes needed for the characters in the string
3505   // while observing object alignment.
3506   DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3507   DCHECK(kCharSize == 1);
3508   addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3509   And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3510 
3511   // Allocate one-byte string in new space.
3512   Allocate(scratch1,
3513            result,
3514            scratch2,
3515            scratch3,
3516            gc_required,
3517            TAG_OBJECT);
3518 
3519   // Set the map, length and hash field.
3520   InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3521                       scratch1, scratch2);
3522 }
3523 
3524 
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3525 void MacroAssembler::AllocateTwoByteConsString(Register result,
3526                                                Register length,
3527                                                Register scratch1,
3528                                                Register scratch2,
3529                                                Label* gc_required) {
3530   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3531            TAG_OBJECT);
3532   InitializeNewString(result,
3533                       length,
3534                       Heap::kConsStringMapRootIndex,
3535                       scratch1,
3536                       scratch2);
3537 }
3538 
3539 
AllocateOneByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3540 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3541                                                Register scratch1,
3542                                                Register scratch2,
3543                                                Label* gc_required) {
3544   Allocate(ConsString::kSize,
3545            result,
3546            scratch1,
3547            scratch2,
3548            gc_required,
3549            TAG_OBJECT);
3550 
3551   InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3552                       scratch1, scratch2);
3553 }
3554 
3555 
AllocateTwoByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3556 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3557                                                  Register length,
3558                                                  Register scratch1,
3559                                                  Register scratch2,
3560                                                  Label* gc_required) {
3561   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3562            TAG_OBJECT);
3563 
3564   InitializeNewString(result,
3565                       length,
3566                       Heap::kSlicedStringMapRootIndex,
3567                       scratch1,
3568                       scratch2);
3569 }
3570 
3571 
AllocateOneByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)3572 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3573                                                  Register length,
3574                                                  Register scratch1,
3575                                                  Register scratch2,
3576                                                  Label* gc_required) {
3577   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3578            TAG_OBJECT);
3579 
3580   InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3581                       scratch1, scratch2);
3582 }
3583 
3584 
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)3585 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3586                                                      Label* not_unique_name) {
3587   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3588   Label succeed;
3589   And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3590   Branch(&succeed, eq, at, Operand(zero_reg));
3591   Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3592 
3593   bind(&succeed);
3594 }
3595 
3596 
3597 // Allocates a heap number or jumps to the label if the young space is full and
3598 // a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * need_gc,TaggingMode tagging_mode,MutableMode mode)3599 void MacroAssembler::AllocateHeapNumber(Register result,
3600                                         Register scratch1,
3601                                         Register scratch2,
3602                                         Register heap_number_map,
3603                                         Label* need_gc,
3604                                         TaggingMode tagging_mode,
3605                                         MutableMode mode) {
3606   // Allocate an object in the heap for the heap number and tag it as a heap
3607   // object.
3608   Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3609            tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3610 
3611   Heap::RootListIndex map_index = mode == MUTABLE
3612       ? Heap::kMutableHeapNumberMapRootIndex
3613       : Heap::kHeapNumberMapRootIndex;
3614   AssertIsRoot(heap_number_map, map_index);
3615 
3616   // Store heap number map in the allocated object.
3617   if (tagging_mode == TAG_RESULT) {
3618     sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3619   } else {
3620     sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3621   }
3622 }
3623 
3624 
AllocateHeapNumberWithValue(Register result,FPURegister value,Register scratch1,Register scratch2,Label * gc_required)3625 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3626                                                  FPURegister value,
3627                                                  Register scratch1,
3628                                                  Register scratch2,
3629                                                  Label* gc_required) {
3630   LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3631   AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3632   sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3633 }
3634 
3635 
3636 // Copies a fixed number of fields of heap objects from src to dst.
CopyFields(Register dst,Register src,RegList temps,int field_count)3637 void MacroAssembler::CopyFields(Register dst,
3638                                 Register src,
3639                                 RegList temps,
3640                                 int field_count) {
3641   DCHECK((temps & dst.bit()) == 0);
3642   DCHECK((temps & src.bit()) == 0);
3643   // Primitive implementation using only one temporary register.
3644 
3645   Register tmp = no_reg;
3646   // Find a temp register in temps list.
3647   for (int i = 0; i < kNumRegisters; i++) {
3648     if ((temps & (1 << i)) != 0) {
3649       tmp.code_ = i;
3650       break;
3651     }
3652   }
3653   DCHECK(!tmp.is(no_reg));
3654 
3655   for (int i = 0; i < field_count; i++) {
3656     lw(tmp, FieldMemOperand(src, i * kPointerSize));
3657     sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3658   }
3659 }
3660 
3661 
CopyBytes(Register src,Register dst,Register length,Register scratch)3662 void MacroAssembler::CopyBytes(Register src,
3663                                Register dst,
3664                                Register length,
3665                                Register scratch) {
3666   Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3667 
3668   // Align src before copying in word size chunks.
3669   Branch(&byte_loop, le, length, Operand(kPointerSize));
3670   bind(&align_loop_1);
3671   And(scratch, src, kPointerSize - 1);
3672   Branch(&word_loop, eq, scratch, Operand(zero_reg));
3673   lbu(scratch, MemOperand(src));
3674   Addu(src, src, 1);
3675   sb(scratch, MemOperand(dst));
3676   Addu(dst, dst, 1);
3677   Subu(length, length, Operand(1));
3678   Branch(&align_loop_1, ne, length, Operand(zero_reg));
3679 
3680   // Copy bytes in word size chunks.
3681   bind(&word_loop);
3682   if (emit_debug_code()) {
3683     And(scratch, src, kPointerSize - 1);
3684     Assert(eq, kExpectingAlignmentForCopyBytes,
3685         scratch, Operand(zero_reg));
3686   }
3687   Branch(&byte_loop, lt, length, Operand(kPointerSize));
3688   lw(scratch, MemOperand(src));
3689   Addu(src, src, kPointerSize);
3690 
3691   // TODO(kalmard) check if this can be optimized to use sw in most cases.
3692   // Can't use unaligned access - copy byte by byte.
3693   if (kArchEndian == kLittle) {
3694     sb(scratch, MemOperand(dst, 0));
3695     srl(scratch, scratch, 8);
3696     sb(scratch, MemOperand(dst, 1));
3697     srl(scratch, scratch, 8);
3698     sb(scratch, MemOperand(dst, 2));
3699     srl(scratch, scratch, 8);
3700     sb(scratch, MemOperand(dst, 3));
3701   } else {
3702     sb(scratch, MemOperand(dst, 3));
3703     srl(scratch, scratch, 8);
3704     sb(scratch, MemOperand(dst, 2));
3705     srl(scratch, scratch, 8);
3706     sb(scratch, MemOperand(dst, 1));
3707     srl(scratch, scratch, 8);
3708     sb(scratch, MemOperand(dst, 0));
3709   }
3710 
3711   Addu(dst, dst, 4);
3712 
3713   Subu(length, length, Operand(kPointerSize));
3714   Branch(&word_loop);
3715 
3716   // Copy the last bytes if any left.
3717   bind(&byte_loop);
3718   Branch(&done, eq, length, Operand(zero_reg));
3719   bind(&byte_loop_1);
3720   lbu(scratch, MemOperand(src));
3721   Addu(src, src, 1);
3722   sb(scratch, MemOperand(dst));
3723   Addu(dst, dst, 1);
3724   Subu(length, length, Operand(1));
3725   Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3726   bind(&done);
3727 }
3728 
3729 
InitializeFieldsWithFiller(Register start_offset,Register end_offset,Register filler)3730 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3731                                                 Register end_offset,
3732                                                 Register filler) {
3733   Label loop, entry;
3734   Branch(&entry);
3735   bind(&loop);
3736   sw(filler, MemOperand(start_offset));
3737   Addu(start_offset, start_offset, kPointerSize);
3738   bind(&entry);
3739   Branch(&loop, lt, start_offset, Operand(end_offset));
3740 }
3741 
3742 
CheckFastElements(Register map,Register scratch,Label * fail)3743 void MacroAssembler::CheckFastElements(Register map,
3744                                        Register scratch,
3745                                        Label* fail) {
3746   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3747   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3748   STATIC_ASSERT(FAST_ELEMENTS == 2);
3749   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3750   lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3751   Branch(fail, hi, scratch,
3752          Operand(Map::kMaximumBitField2FastHoleyElementValue));
3753 }
3754 
3755 
CheckFastObjectElements(Register map,Register scratch,Label * fail)3756 void MacroAssembler::CheckFastObjectElements(Register map,
3757                                              Register scratch,
3758                                              Label* fail) {
3759   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3760   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3761   STATIC_ASSERT(FAST_ELEMENTS == 2);
3762   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3763   lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3764   Branch(fail, ls, scratch,
3765          Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3766   Branch(fail, hi, scratch,
3767          Operand(Map::kMaximumBitField2FastHoleyElementValue));
3768 }
3769 
3770 
CheckFastSmiElements(Register map,Register scratch,Label * fail)3771 void MacroAssembler::CheckFastSmiElements(Register map,
3772                                           Register scratch,
3773                                           Label* fail) {
3774   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3775   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3776   lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3777   Branch(fail, hi, scratch,
3778          Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3779 }
3780 
3781 
StoreNumberToDoubleElements(Register value_reg,Register key_reg,Register elements_reg,Register scratch1,Register scratch2,Register scratch3,Label * fail,int elements_offset)3782 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3783                                                  Register key_reg,
3784                                                  Register elements_reg,
3785                                                  Register scratch1,
3786                                                  Register scratch2,
3787                                                  Register scratch3,
3788                                                  Label* fail,
3789                                                  int elements_offset) {
3790   Label smi_value, maybe_nan, have_double_value, is_nan, done;
3791   Register mantissa_reg = scratch2;
3792   Register exponent_reg = scratch3;
3793 
3794   // Handle smi values specially.
3795   JumpIfSmi(value_reg, &smi_value);
3796 
3797   // Ensure that the object is a heap number
3798   CheckMap(value_reg,
3799            scratch1,
3800            Heap::kHeapNumberMapRootIndex,
3801            fail,
3802            DONT_DO_SMI_CHECK);
3803 
3804   // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3805   // in the exponent.
3806   li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
3807   lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3808   Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3809 
3810   lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3811 
3812   bind(&have_double_value);
3813   sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3814   Addu(scratch1, scratch1, elements_reg);
3815   sw(mantissa_reg,
3816       FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3817           + kHoleNanLower32Offset));
3818   sw(exponent_reg,
3819       FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3820           + kHoleNanUpper32Offset));
3821   jmp(&done);
3822 
3823   bind(&maybe_nan);
3824   // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3825   // it's an Infinity, and the non-NaN code path applies.
3826   Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3827   lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3828   Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3829   bind(&is_nan);
3830   // Load canonical NaN for storing into the double array.
3831   LoadRoot(at, Heap::kNanValueRootIndex);
3832   lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
3833   lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
3834   jmp(&have_double_value);
3835 
3836   bind(&smi_value);
3837   Addu(scratch1, elements_reg,
3838       Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3839               elements_offset));
3840   sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3841   Addu(scratch1, scratch1, scratch2);
3842   // scratch1 is now effective address of the double element
3843 
3844   Register untagged_value = elements_reg;
3845   SmiUntag(untagged_value, value_reg);
3846   mtc1(untagged_value, f2);
3847   cvt_d_w(f0, f2);
3848   sdc1(f0, MemOperand(scratch1, 0));
3849   bind(&done);
3850 }
3851 
3852 
CompareMapAndBranch(Register obj,Register scratch,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)3853 void MacroAssembler::CompareMapAndBranch(Register obj,
3854                                          Register scratch,
3855                                          Handle<Map> map,
3856                                          Label* early_success,
3857                                          Condition cond,
3858                                          Label* branch_to) {
3859   lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3860   CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3861 }
3862 
3863 
CompareMapAndBranch(Register obj_map,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)3864 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3865                                          Handle<Map> map,
3866                                          Label* early_success,
3867                                          Condition cond,
3868                                          Label* branch_to) {
3869   Branch(branch_to, cond, obj_map, Operand(map));
3870 }
3871 
3872 
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)3873 void MacroAssembler::CheckMap(Register obj,
3874                               Register scratch,
3875                               Handle<Map> map,
3876                               Label* fail,
3877                               SmiCheckType smi_check_type) {
3878   if (smi_check_type == DO_SMI_CHECK) {
3879     JumpIfSmi(obj, fail);
3880   }
3881   Label success;
3882   CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
3883   bind(&success);
3884 }
3885 
3886 
DispatchMap(Register obj,Register scratch,Handle<Map> map,Handle<Code> success,SmiCheckType smi_check_type)3887 void MacroAssembler::DispatchMap(Register obj,
3888                                  Register scratch,
3889                                  Handle<Map> map,
3890                                  Handle<Code> success,
3891                                  SmiCheckType smi_check_type) {
3892   Label fail;
3893   if (smi_check_type == DO_SMI_CHECK) {
3894     JumpIfSmi(obj, &fail);
3895   }
3896   lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3897   Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
3898   bind(&fail);
3899 }
3900 
3901 
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)3902 void MacroAssembler::CheckMap(Register obj,
3903                               Register scratch,
3904                               Heap::RootListIndex index,
3905                               Label* fail,
3906                               SmiCheckType smi_check_type) {
3907   if (smi_check_type == DO_SMI_CHECK) {
3908     JumpIfSmi(obj, fail);
3909   }
3910   lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3911   LoadRoot(at, index);
3912   Branch(fail, ne, scratch, Operand(at));
3913 }
3914 
3915 
MovFromFloatResult(DoubleRegister dst)3916 void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
3917   if (IsMipsSoftFloatABI) {
3918     if (kArchEndian == kLittle) {
3919       Move(dst, v0, v1);
3920     } else {
3921       Move(dst, v1, v0);
3922     }
3923   } else {
3924     Move(dst, f0);  // Reg f0 is o32 ABI FP return value.
3925   }
3926 }
3927 
3928 
MovFromFloatParameter(DoubleRegister dst)3929 void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
3930   if (IsMipsSoftFloatABI) {
3931     if (kArchEndian == kLittle) {
3932       Move(dst, a0, a1);
3933     } else {
3934       Move(dst, a1, a0);
3935     }
3936   } else {
3937     Move(dst, f12);  // Reg f12 is o32 ABI FP first argument value.
3938   }
3939 }
3940 
3941 
MovToFloatParameter(DoubleRegister src)3942 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
3943   if (!IsMipsSoftFloatABI) {
3944     Move(f12, src);
3945   } else {
3946     if (kArchEndian == kLittle) {
3947       Move(a0, a1, src);
3948     } else {
3949       Move(a1, a0, src);
3950     }
3951   }
3952 }
3953 
3954 
MovToFloatResult(DoubleRegister src)3955 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
3956   if (!IsMipsSoftFloatABI) {
3957     Move(f0, src);
3958   } else {
3959     if (kArchEndian == kLittle) {
3960       Move(v0, v1, src);
3961     } else {
3962       Move(v1, v0, src);
3963     }
3964   }
3965 }
3966 
3967 
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)3968 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3969                                           DoubleRegister src2) {
3970   if (!IsMipsSoftFloatABI) {
3971     if (src2.is(f12)) {
3972       DCHECK(!src1.is(f14));
3973       Move(f14, src2);
3974       Move(f12, src1);
3975     } else {
3976       Move(f12, src1);
3977       Move(f14, src2);
3978     }
3979   } else {
3980     if (kArchEndian == kLittle) {
3981       Move(a0, a1, src1);
3982       Move(a2, a3, src2);
3983     } else {
3984       Move(a1, a0, src1);
3985       Move(a3, a2, src2);
3986     }
3987   }
3988 }
3989 
3990 
3991 // -----------------------------------------------------------------------------
3992 // JavaScript invokes.
3993 
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Handle<Code> code_constant,Register code_reg,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)3994 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3995                                     const ParameterCount& actual,
3996                                     Handle<Code> code_constant,
3997                                     Register code_reg,
3998                                     Label* done,
3999                                     bool* definitely_mismatches,
4000                                     InvokeFlag flag,
4001                                     const CallWrapper& call_wrapper) {
4002   bool definitely_matches = false;
4003   *definitely_mismatches = false;
4004   Label regular_invoke;
4005 
4006   // Check whether the expected and actual arguments count match. If not,
4007   // setup registers according to contract with ArgumentsAdaptorTrampoline:
4008   //  a0: actual arguments count
4009   //  a1: function (passed through to callee)
4010   //  a2: expected arguments count
4011 
4012   // The code below is made a lot easier because the calling code already sets
4013   // up actual and expected registers according to the contract if values are
4014   // passed in registers.
4015   DCHECK(actual.is_immediate() || actual.reg().is(a0));
4016   DCHECK(expected.is_immediate() || expected.reg().is(a2));
4017   DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
4018 
4019   if (expected.is_immediate()) {
4020     DCHECK(actual.is_immediate());
4021     if (expected.immediate() == actual.immediate()) {
4022       definitely_matches = true;
4023     } else {
4024       li(a0, Operand(actual.immediate()));
4025       const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
4026       if (expected.immediate() == sentinel) {
4027         // Don't worry about adapting arguments for builtins that
4028         // don't want that done. Skip adaption code by making it look
4029         // like we have a match between expected and actual number of
4030         // arguments.
4031         definitely_matches = true;
4032       } else {
4033         *definitely_mismatches = true;
4034         li(a2, Operand(expected.immediate()));
4035       }
4036     }
4037   } else if (actual.is_immediate()) {
4038     Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
4039     li(a0, Operand(actual.immediate()));
4040   } else {
4041     Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
4042   }
4043 
4044   if (!definitely_matches) {
4045     if (!code_constant.is_null()) {
4046       li(a3, Operand(code_constant));
4047       addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
4048     }
4049 
4050     Handle<Code> adaptor =
4051         isolate()->builtins()->ArgumentsAdaptorTrampoline();
4052     if (flag == CALL_FUNCTION) {
4053       call_wrapper.BeforeCall(CallSize(adaptor));
4054       Call(adaptor);
4055       call_wrapper.AfterCall();
4056       if (!*definitely_mismatches) {
4057         Branch(done);
4058       }
4059     } else {
4060       Jump(adaptor, RelocInfo::CODE_TARGET);
4061     }
4062     bind(&regular_invoke);
4063   }
4064 }
4065 
4066 
InvokeCode(Register code,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4067 void MacroAssembler::InvokeCode(Register code,
4068                                 const ParameterCount& expected,
4069                                 const ParameterCount& actual,
4070                                 InvokeFlag flag,
4071                                 const CallWrapper& call_wrapper) {
4072   // You can't call a function without a valid frame.
4073   DCHECK(flag == JUMP_FUNCTION || has_frame());
4074 
4075   Label done;
4076 
4077   bool definitely_mismatches = false;
4078   InvokePrologue(expected, actual, Handle<Code>::null(), code,
4079                  &done, &definitely_mismatches, flag,
4080                  call_wrapper);
4081   if (!definitely_mismatches) {
4082     if (flag == CALL_FUNCTION) {
4083       call_wrapper.BeforeCall(CallSize(code));
4084       Call(code);
4085       call_wrapper.AfterCall();
4086     } else {
4087       DCHECK(flag == JUMP_FUNCTION);
4088       Jump(code);
4089     }
4090     // Continue here if InvokePrologue does handle the invocation due to
4091     // mismatched parameter counts.
4092     bind(&done);
4093   }
4094 }
4095 
4096 
InvokeFunction(Register function,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4097 void MacroAssembler::InvokeFunction(Register function,
4098                                     const ParameterCount& actual,
4099                                     InvokeFlag flag,
4100                                     const CallWrapper& call_wrapper) {
4101   // You can't call a function without a valid frame.
4102   DCHECK(flag == JUMP_FUNCTION || has_frame());
4103 
4104   // Contract with called JS functions requires that function is passed in a1.
4105   DCHECK(function.is(a1));
4106   Register expected_reg = a2;
4107   Register code_reg = a3;
4108 
4109   lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4110   lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4111   lw(expected_reg,
4112       FieldMemOperand(code_reg,
4113                       SharedFunctionInfo::kFormalParameterCountOffset));
4114   sra(expected_reg, expected_reg, kSmiTagSize);
4115   lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4116 
4117   ParameterCount expected(expected_reg);
4118   InvokeCode(code_reg, expected, actual, flag, call_wrapper);
4119 }
4120 
4121 
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4122 void MacroAssembler::InvokeFunction(Register function,
4123                                     const ParameterCount& expected,
4124                                     const ParameterCount& actual,
4125                                     InvokeFlag flag,
4126                                     const CallWrapper& call_wrapper) {
4127   // You can't call a function without a valid frame.
4128   DCHECK(flag == JUMP_FUNCTION || has_frame());
4129 
4130   // Contract with called JS functions requires that function is passed in a1.
4131   DCHECK(function.is(a1));
4132 
4133   // Get the function and setup the context.
4134   lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4135 
4136   // We call indirectly through the code field in the function to
4137   // allow recompilation to take effect without changing any of the
4138   // call sites.
4139   lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4140   InvokeCode(a3, expected, actual, flag, call_wrapper);
4141 }
4142 
4143 
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4144 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4145                                     const ParameterCount& expected,
4146                                     const ParameterCount& actual,
4147                                     InvokeFlag flag,
4148                                     const CallWrapper& call_wrapper) {
4149   li(a1, function);
4150   InvokeFunction(a1, expected, actual, flag, call_wrapper);
4151 }
4152 
4153 
IsObjectJSObjectType(Register heap_object,Register map,Register scratch,Label * fail)4154 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
4155                                           Register map,
4156                                           Register scratch,
4157                                           Label* fail) {
4158   lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
4159   IsInstanceJSObjectType(map, scratch, fail);
4160 }
4161 
4162 
IsInstanceJSObjectType(Register map,Register scratch,Label * fail)4163 void MacroAssembler::IsInstanceJSObjectType(Register map,
4164                                             Register scratch,
4165                                             Label* fail) {
4166   lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
4167   Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
4168   Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
4169 }
4170 
4171 
IsObjectJSStringType(Register object,Register scratch,Label * fail)4172 void MacroAssembler::IsObjectJSStringType(Register object,
4173                                           Register scratch,
4174                                           Label* fail) {
4175   DCHECK(kNotStringTag != 0);
4176 
4177   lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4178   lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4179   And(scratch, scratch, Operand(kIsNotStringMask));
4180   Branch(fail, ne, scratch, Operand(zero_reg));
4181 }
4182 
4183 
IsObjectNameType(Register object,Register scratch,Label * fail)4184 void MacroAssembler::IsObjectNameType(Register object,
4185                                       Register scratch,
4186                                       Label* fail) {
4187   lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4188   lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4189   Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
4190 }
4191 
4192 
4193 // ---------------------------------------------------------------------------
4194 // Support functions.
4195 
4196 
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss,bool miss_on_bound_function)4197 void MacroAssembler::TryGetFunctionPrototype(Register function,
4198                                              Register result,
4199                                              Register scratch,
4200                                              Label* miss,
4201                                              bool miss_on_bound_function) {
4202   Label non_instance;
4203   if (miss_on_bound_function) {
4204     // Check that the receiver isn't a smi.
4205     JumpIfSmi(function, miss);
4206 
4207     // Check that the function really is a function.  Load map into result reg.
4208     GetObjectType(function, result, scratch);
4209     Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
4210 
4211     lw(scratch,
4212        FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
4213     lw(scratch,
4214        FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
4215     And(scratch, scratch,
4216         Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
4217     Branch(miss, ne, scratch, Operand(zero_reg));
4218 
4219     // Make sure that the function has an instance prototype.
4220     lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
4221     And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
4222     Branch(&non_instance, ne, scratch, Operand(zero_reg));
4223   }
4224 
4225   // Get the prototype or initial map from the function.
4226   lw(result,
4227      FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4228 
4229   // If the prototype or initial map is the hole, don't return it and
4230   // simply miss the cache instead. This will allow us to allocate a
4231   // prototype object on-demand in the runtime system.
4232   LoadRoot(t8, Heap::kTheHoleValueRootIndex);
4233   Branch(miss, eq, result, Operand(t8));
4234 
4235   // If the function does not have an initial map, we're done.
4236   Label done;
4237   GetObjectType(result, scratch, scratch);
4238   Branch(&done, ne, scratch, Operand(MAP_TYPE));
4239 
4240   // Get the prototype from the initial map.
4241   lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
4242 
4243   if (miss_on_bound_function) {
4244     jmp(&done);
4245 
4246     // Non-instance prototype: Fetch prototype from constructor field
4247     // in initial map.
4248     bind(&non_instance);
4249     lw(result, FieldMemOperand(result, Map::kConstructorOffset));
4250   }
4251 
4252   // All done.
4253   bind(&done);
4254 }
4255 
4256 
GetObjectType(Register object,Register map,Register type_reg)4257 void MacroAssembler::GetObjectType(Register object,
4258                                    Register map,
4259                                    Register type_reg) {
4260   lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
4261   lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4262 }
4263 
4264 
4265 // -----------------------------------------------------------------------------
4266 // Runtime calls.
4267 
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)4268 void MacroAssembler::CallStub(CodeStub* stub,
4269                               TypeFeedbackId ast_id,
4270                               Condition cond,
4271                               Register r1,
4272                               const Operand& r2,
4273                               BranchDelaySlot bd) {
4274   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
4275   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
4276        cond, r1, r2, bd);
4277 }
4278 
4279 
TailCallStub(CodeStub * stub,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)4280 void MacroAssembler::TailCallStub(CodeStub* stub,
4281                                   Condition cond,
4282                                   Register r1,
4283                                   const Operand& r2,
4284                                   BranchDelaySlot bd) {
4285   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4286 }
4287 
4288 
AddressOffset(ExternalReference ref0,ExternalReference ref1)4289 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
4290   return ref0.address() - ref1.address();
4291 }
4292 
4293 
CallApiFunctionAndReturn(Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand return_value_operand,MemOperand * context_restore_operand)4294 void MacroAssembler::CallApiFunctionAndReturn(
4295     Register function_address,
4296     ExternalReference thunk_ref,
4297     int stack_space,
4298     MemOperand return_value_operand,
4299     MemOperand* context_restore_operand) {
4300   ExternalReference next_address =
4301       ExternalReference::handle_scope_next_address(isolate());
4302   const int kNextOffset = 0;
4303   const int kLimitOffset = AddressOffset(
4304       ExternalReference::handle_scope_limit_address(isolate()),
4305       next_address);
4306   const int kLevelOffset = AddressOffset(
4307       ExternalReference::handle_scope_level_address(isolate()),
4308       next_address);
4309 
4310   DCHECK(function_address.is(a1) || function_address.is(a2));
4311 
4312   Label profiler_disabled;
4313   Label end_profiler_check;
4314   li(t9, Operand(ExternalReference::is_profiling_address(isolate())));
4315   lb(t9, MemOperand(t9, 0));
4316   Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
4317 
4318   // Additional parameter is the address of the actual callback.
4319   li(t9, Operand(thunk_ref));
4320   jmp(&end_profiler_check);
4321 
4322   bind(&profiler_disabled);
4323   mov(t9, function_address);
4324   bind(&end_profiler_check);
4325 
4326   // Allocate HandleScope in callee-save registers.
4327   li(s3, Operand(next_address));
4328   lw(s0, MemOperand(s3, kNextOffset));
4329   lw(s1, MemOperand(s3, kLimitOffset));
4330   lw(s2, MemOperand(s3, kLevelOffset));
4331   Addu(s2, s2, Operand(1));
4332   sw(s2, MemOperand(s3, kLevelOffset));
4333 
4334   if (FLAG_log_timer_events) {
4335     FrameScope frame(this, StackFrame::MANUAL);
4336     PushSafepointRegisters();
4337     PrepareCallCFunction(1, a0);
4338     li(a0, Operand(ExternalReference::isolate_address(isolate())));
4339     CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
4340     PopSafepointRegisters();
4341   }
4342 
4343   // Native call returns to the DirectCEntry stub which redirects to the
4344   // return address pushed on stack (could have moved after GC).
4345   // DirectCEntry stub itself is generated early and never moves.
4346   DirectCEntryStub stub(isolate());
4347   stub.GenerateCall(this, t9);
4348 
4349   if (FLAG_log_timer_events) {
4350     FrameScope frame(this, StackFrame::MANUAL);
4351     PushSafepointRegisters();
4352     PrepareCallCFunction(1, a0);
4353     li(a0, Operand(ExternalReference::isolate_address(isolate())));
4354     CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
4355     PopSafepointRegisters();
4356   }
4357 
4358   Label promote_scheduled_exception;
4359   Label exception_handled;
4360   Label delete_allocated_handles;
4361   Label leave_exit_frame;
4362   Label return_value_loaded;
4363 
4364   // Load value from ReturnValue.
4365   lw(v0, return_value_operand);
4366   bind(&return_value_loaded);
4367 
4368   // No more valid handles (the result handle was the last one). Restore
4369   // previous handle scope.
4370   sw(s0, MemOperand(s3, kNextOffset));
4371   if (emit_debug_code()) {
4372     lw(a1, MemOperand(s3, kLevelOffset));
4373     Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
4374   }
4375   Subu(s2, s2, Operand(1));
4376   sw(s2, MemOperand(s3, kLevelOffset));
4377   lw(at, MemOperand(s3, kLimitOffset));
4378   Branch(&delete_allocated_handles, ne, s1, Operand(at));
4379 
4380   // Check if the function scheduled an exception.
4381   bind(&leave_exit_frame);
4382   LoadRoot(t0, Heap::kTheHoleValueRootIndex);
4383   li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
4384   lw(t1, MemOperand(at));
4385   Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
4386   bind(&exception_handled);
4387 
4388   bool restore_context = context_restore_operand != NULL;
4389   if (restore_context) {
4390     lw(cp, *context_restore_operand);
4391   }
4392   li(s0, Operand(stack_space));
4393   LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
4394 
4395   bind(&promote_scheduled_exception);
4396   {
4397     FrameScope frame(this, StackFrame::INTERNAL);
4398     CallExternalReference(
4399         ExternalReference(Runtime::kPromoteScheduledException, isolate()),
4400         0);
4401   }
4402   jmp(&exception_handled);
4403 
4404   // HandleScope limit has changed. Delete allocated extensions.
4405   bind(&delete_allocated_handles);
4406   sw(s1, MemOperand(s3, kLimitOffset));
4407   mov(s0, v0);
4408   mov(a0, v0);
4409   PrepareCallCFunction(1, s1);
4410   li(a0, Operand(ExternalReference::isolate_address(isolate())));
4411   CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
4412       1);
4413   mov(v0, s0);
4414   jmp(&leave_exit_frame);
4415 }
4416 
4417 
AllowThisStubCall(CodeStub * stub)4418 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4419   return has_frame_ || !stub->SometimesSetsUpAFrame();
4420 }
4421 
4422 
IndexFromHash(Register hash,Register index)4423 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4424   // If the hash field contains an array index pick it out. The assert checks
4425   // that the constants for the maximum number of digits for an array index
4426   // cached in the hash field and the number of bits reserved for it does not
4427   // conflict.
4428   DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
4429          (1 << String::kArrayIndexValueBits));
4430   DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
4431 }
4432 
4433 
ObjectToDoubleFPURegister(Register object,FPURegister result,Register scratch1,Register scratch2,Register heap_number_map,Label * not_number,ObjectToDoubleFlags flags)4434 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4435                                                FPURegister result,
4436                                                Register scratch1,
4437                                                Register scratch2,
4438                                                Register heap_number_map,
4439                                                Label* not_number,
4440                                                ObjectToDoubleFlags flags) {
4441   Label done;
4442   if ((flags & OBJECT_NOT_SMI) == 0) {
4443     Label not_smi;
4444     JumpIfNotSmi(object, &not_smi);
4445     // Remove smi tag and convert to double.
4446     sra(scratch1, object, kSmiTagSize);
4447     mtc1(scratch1, result);
4448     cvt_d_w(result, result);
4449     Branch(&done);
4450     bind(&not_smi);
4451   }
4452   // Check for heap number and load double value from it.
4453   lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4454   Branch(not_number, ne, scratch1, Operand(heap_number_map));
4455 
4456   if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4457     // If exponent is all ones the number is either a NaN or +/-Infinity.
4458     Register exponent = scratch1;
4459     Register mask_reg = scratch2;
4460     lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4461     li(mask_reg, HeapNumber::kExponentMask);
4462 
4463     And(exponent, exponent, mask_reg);
4464     Branch(not_number, eq, exponent, Operand(mask_reg));
4465   }
4466   ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4467   bind(&done);
4468 }
4469 
4470 
SmiToDoubleFPURegister(Register smi,FPURegister value,Register scratch1)4471 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4472                                             FPURegister value,
4473                                             Register scratch1) {
4474   sra(scratch1, smi, kSmiTagSize);
4475   mtc1(scratch1, value);
4476   cvt_d_w(value, value);
4477 }
4478 
4479 
AdduAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)4480 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4481                                              Register left,
4482                                              Register right,
4483                                              Register overflow_dst,
4484                                              Register scratch) {
4485   DCHECK(!dst.is(overflow_dst));
4486   DCHECK(!dst.is(scratch));
4487   DCHECK(!overflow_dst.is(scratch));
4488   DCHECK(!overflow_dst.is(left));
4489   DCHECK(!overflow_dst.is(right));
4490 
4491   if (left.is(right) && dst.is(left)) {
4492     DCHECK(!dst.is(t9));
4493     DCHECK(!scratch.is(t9));
4494     DCHECK(!left.is(t9));
4495     DCHECK(!right.is(t9));
4496     DCHECK(!overflow_dst.is(t9));
4497     mov(t9, right);
4498     right = t9;
4499   }
4500 
4501   if (dst.is(left)) {
4502     mov(scratch, left);  // Preserve left.
4503     addu(dst, left, right);  // Left is overwritten.
4504     xor_(scratch, dst, scratch);  // Original left.
4505     xor_(overflow_dst, dst, right);
4506     and_(overflow_dst, overflow_dst, scratch);
4507   } else if (dst.is(right)) {
4508     mov(scratch, right);  // Preserve right.
4509     addu(dst, left, right);  // Right is overwritten.
4510     xor_(scratch, dst, scratch);  // Original right.
4511     xor_(overflow_dst, dst, left);
4512     and_(overflow_dst, overflow_dst, scratch);
4513   } else {
4514     addu(dst, left, right);
4515     xor_(overflow_dst, dst, left);
4516     xor_(scratch, dst, right);
4517     and_(overflow_dst, scratch, overflow_dst);
4518   }
4519 }
4520 
4521 
SubuAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)4522 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4523                                              Register left,
4524                                              Register right,
4525                                              Register overflow_dst,
4526                                              Register scratch) {
4527   DCHECK(!dst.is(overflow_dst));
4528   DCHECK(!dst.is(scratch));
4529   DCHECK(!overflow_dst.is(scratch));
4530   DCHECK(!overflow_dst.is(left));
4531   DCHECK(!overflow_dst.is(right));
4532   DCHECK(!scratch.is(left));
4533   DCHECK(!scratch.is(right));
4534 
4535   // This happens with some crankshaft code. Since Subu works fine if
4536   // left == right, let's not make that restriction here.
4537   if (left.is(right)) {
4538     mov(dst, zero_reg);
4539     mov(overflow_dst, zero_reg);
4540     return;
4541   }
4542 
4543   if (dst.is(left)) {
4544     mov(scratch, left);  // Preserve left.
4545     subu(dst, left, right);  // Left is overwritten.
4546     xor_(overflow_dst, dst, scratch);  // scratch is original left.
4547     xor_(scratch, scratch, right);  // scratch is original left.
4548     and_(overflow_dst, scratch, overflow_dst);
4549   } else if (dst.is(right)) {
4550     mov(scratch, right);  // Preserve right.
4551     subu(dst, left, right);  // Right is overwritten.
4552     xor_(overflow_dst, dst, left);
4553     xor_(scratch, left, scratch);  // Original right.
4554     and_(overflow_dst, scratch, overflow_dst);
4555   } else {
4556     subu(dst, left, right);
4557     xor_(overflow_dst, dst, left);
4558     xor_(scratch, left, right);
4559     and_(overflow_dst, scratch, overflow_dst);
4560   }
4561 }
4562 
4563 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)4564 void MacroAssembler::CallRuntime(const Runtime::Function* f,
4565                                  int num_arguments,
4566                                  SaveFPRegsMode save_doubles) {
4567   // All parameters are on the stack. v0 has the return value after call.
4568 
4569   // If the expected number of arguments of the runtime function is
4570   // constant, we check that the actual number of arguments match the
4571   // expectation.
4572   CHECK(f->nargs < 0 || f->nargs == num_arguments);
4573 
4574   // TODO(1236192): Most runtime routines don't need the number of
4575   // arguments passed in because it is constant. At some point we
4576   // should remove this need and make the runtime routine entry code
4577   // smarter.
4578   PrepareCEntryArgs(num_arguments);
4579   PrepareCEntryFunction(ExternalReference(f, isolate()));
4580   CEntryStub stub(isolate(), 1, save_doubles);
4581   CallStub(&stub);
4582 }
4583 
4584 
CallExternalReference(const ExternalReference & ext,int num_arguments,BranchDelaySlot bd)4585 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4586                                            int num_arguments,
4587                                            BranchDelaySlot bd) {
4588   PrepareCEntryArgs(num_arguments);
4589   PrepareCEntryFunction(ext);
4590 
4591   CEntryStub stub(isolate(), 1);
4592   CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4593 }
4594 
4595 
TailCallExternalReference(const ExternalReference & ext,int num_arguments,int result_size)4596 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4597                                                int num_arguments,
4598                                                int result_size) {
4599   // TODO(1236192): Most runtime routines don't need the number of
4600   // arguments passed in because it is constant. At some point we
4601   // should remove this need and make the runtime routine entry code
4602   // smarter.
4603   PrepareCEntryArgs(num_arguments);
4604   JumpToExternalReference(ext);
4605 }
4606 
4607 
TailCallRuntime(Runtime::FunctionId fid,int num_arguments,int result_size)4608 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4609                                      int num_arguments,
4610                                      int result_size) {
4611   TailCallExternalReference(ExternalReference(fid, isolate()),
4612                             num_arguments,
4613                             result_size);
4614 }
4615 
4616 
JumpToExternalReference(const ExternalReference & builtin,BranchDelaySlot bd)4617 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4618                                              BranchDelaySlot bd) {
4619   PrepareCEntryFunction(builtin);
4620   CEntryStub stub(isolate(), 1);
4621   Jump(stub.GetCode(),
4622        RelocInfo::CODE_TARGET,
4623        al,
4624        zero_reg,
4625        Operand(zero_reg),
4626        bd);
4627 }
4628 
4629 
InvokeBuiltin(Builtins::JavaScript id,InvokeFlag flag,const CallWrapper & call_wrapper)4630 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4631                                    InvokeFlag flag,
4632                                    const CallWrapper& call_wrapper) {
4633   // You can't call a builtin without a valid frame.
4634   DCHECK(flag == JUMP_FUNCTION || has_frame());
4635 
4636   GetBuiltinEntry(t9, id);
4637   if (flag == CALL_FUNCTION) {
4638     call_wrapper.BeforeCall(CallSize(t9));
4639     Call(t9);
4640     call_wrapper.AfterCall();
4641   } else {
4642     DCHECK(flag == JUMP_FUNCTION);
4643     Jump(t9);
4644   }
4645 }
4646 
4647 
GetBuiltinFunction(Register target,Builtins::JavaScript id)4648 void MacroAssembler::GetBuiltinFunction(Register target,
4649                                         Builtins::JavaScript id) {
4650   // Load the builtins object into target register.
4651   lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4652   lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4653   // Load the JavaScript builtin function from the builtins object.
4654   lw(target, FieldMemOperand(target,
4655                           JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4656 }
4657 
4658 
GetBuiltinEntry(Register target,Builtins::JavaScript id)4659 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4660   DCHECK(!target.is(a1));
4661   GetBuiltinFunction(a1, id);
4662   // Load the code entry point from the builtins object.
4663   lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4664 }
4665 
4666 
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)4667 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4668                                 Register scratch1, Register scratch2) {
4669   if (FLAG_native_code_counters && counter->Enabled()) {
4670     li(scratch1, Operand(value));
4671     li(scratch2, Operand(ExternalReference(counter)));
4672     sw(scratch1, MemOperand(scratch2));
4673   }
4674 }
4675 
4676 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)4677 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4678                                       Register scratch1, Register scratch2) {
4679   DCHECK(value > 0);
4680   if (FLAG_native_code_counters && counter->Enabled()) {
4681     li(scratch2, Operand(ExternalReference(counter)));
4682     lw(scratch1, MemOperand(scratch2));
4683     Addu(scratch1, scratch1, Operand(value));
4684     sw(scratch1, MemOperand(scratch2));
4685   }
4686 }
4687 
4688 
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)4689 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4690                                       Register scratch1, Register scratch2) {
4691   DCHECK(value > 0);
4692   if (FLAG_native_code_counters && counter->Enabled()) {
4693     li(scratch2, Operand(ExternalReference(counter)));
4694     lw(scratch1, MemOperand(scratch2));
4695     Subu(scratch1, scratch1, Operand(value));
4696     sw(scratch1, MemOperand(scratch2));
4697   }
4698 }
4699 
4700 
4701 // -----------------------------------------------------------------------------
4702 // Debugging.
4703 
Assert(Condition cc,BailoutReason reason,Register rs,Operand rt)4704 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4705                             Register rs, Operand rt) {
4706   if (emit_debug_code())
4707     Check(cc, reason, rs, rt);
4708 }
4709 
4710 
AssertFastElements(Register elements)4711 void MacroAssembler::AssertFastElements(Register elements) {
4712   if (emit_debug_code()) {
4713     DCHECK(!elements.is(at));
4714     Label ok;
4715     push(elements);
4716     lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4717     LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4718     Branch(&ok, eq, elements, Operand(at));
4719     LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4720     Branch(&ok, eq, elements, Operand(at));
4721     LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4722     Branch(&ok, eq, elements, Operand(at));
4723     Abort(kJSObjectWithFastElementsMapHasSlowElements);
4724     bind(&ok);
4725     pop(elements);
4726   }
4727 }
4728 
4729 
Check(Condition cc,BailoutReason reason,Register rs,Operand rt)4730 void MacroAssembler::Check(Condition cc, BailoutReason reason,
4731                            Register rs, Operand rt) {
4732   Label L;
4733   Branch(&L, cc, rs, rt);
4734   Abort(reason);
4735   // Will not return here.
4736   bind(&L);
4737 }
4738 
4739 
Abort(BailoutReason reason)4740 void MacroAssembler::Abort(BailoutReason reason) {
4741   Label abort_start;
4742   bind(&abort_start);
4743 #ifdef DEBUG
4744   const char* msg = GetBailoutReason(reason);
4745   if (msg != NULL) {
4746     RecordComment("Abort message: ");
4747     RecordComment(msg);
4748   }
4749 
4750   if (FLAG_trap_on_abort) {
4751     stop(msg);
4752     return;
4753   }
4754 #endif
4755 
4756   li(a0, Operand(Smi::FromInt(reason)));
4757   push(a0);
4758   // Disable stub call restrictions to always allow calls to abort.
4759   if (!has_frame_) {
4760     // We don't actually want to generate a pile of code for this, so just
4761     // claim there is a stack frame, without generating one.
4762     FrameScope scope(this, StackFrame::NONE);
4763     CallRuntime(Runtime::kAbort, 1);
4764   } else {
4765     CallRuntime(Runtime::kAbort, 1);
4766   }
4767   // Will not return here.
4768   if (is_trampoline_pool_blocked()) {
4769     // If the calling code cares about the exact number of
4770     // instructions generated, we insert padding here to keep the size
4771     // of the Abort macro constant.
4772     // Currently in debug mode with debug_code enabled the number of
4773     // generated instructions is 10, so we use this as a maximum value.
4774     static const int kExpectedAbortInstructions = 10;
4775     int abort_instructions = InstructionsGeneratedSince(&abort_start);
4776     DCHECK(abort_instructions <= kExpectedAbortInstructions);
4777     while (abort_instructions++ < kExpectedAbortInstructions) {
4778       nop();
4779     }
4780   }
4781 }
4782 
4783 
LoadContext(Register dst,int context_chain_length)4784 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4785   if (context_chain_length > 0) {
4786     // Move up the chain of contexts to the context containing the slot.
4787     lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4788     for (int i = 1; i < context_chain_length; i++) {
4789       lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4790     }
4791   } else {
4792     // Slot is in the current function context.  Move it into the
4793     // destination register in case we store into it (the write barrier
4794     // cannot be allowed to destroy the context in esi).
4795     Move(dst, cp);
4796   }
4797 }
4798 
4799 
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)4800 void MacroAssembler::LoadTransitionedArrayMapConditional(
4801     ElementsKind expected_kind,
4802     ElementsKind transitioned_kind,
4803     Register map_in_out,
4804     Register scratch,
4805     Label* no_map_match) {
4806   // Load the global or builtins object from the current context.
4807   lw(scratch,
4808      MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4809   lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4810 
4811   // Check that the function's map is the same as the expected cached map.
4812   lw(scratch,
4813      MemOperand(scratch,
4814                 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4815   size_t offset = expected_kind * kPointerSize +
4816       FixedArrayBase::kHeaderSize;
4817   lw(at, FieldMemOperand(scratch, offset));
4818   Branch(no_map_match, ne, map_in_out, Operand(at));
4819 
4820   // Use the transitioned cached map.
4821   offset = transitioned_kind * kPointerSize +
4822       FixedArrayBase::kHeaderSize;
4823   lw(map_in_out, FieldMemOperand(scratch, offset));
4824 }
4825 
4826 
LoadGlobalFunction(int index,Register function)4827 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4828   // Load the global or builtins object from the current context.
4829   lw(function,
4830      MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4831   // Load the native context from the global or builtins object.
4832   lw(function, FieldMemOperand(function,
4833                                GlobalObject::kNativeContextOffset));
4834   // Load the function from the native context.
4835   lw(function, MemOperand(function, Context::SlotOffset(index)));
4836 }
4837 
4838 
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)4839 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4840                                                   Register map,
4841                                                   Register scratch) {
4842   // Load the initial map. The global functions all have initial maps.
4843   lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4844   if (emit_debug_code()) {
4845     Label ok, fail;
4846     CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4847     Branch(&ok);
4848     bind(&fail);
4849     Abort(kGlobalFunctionsMustHaveInitialMap);
4850     bind(&ok);
4851   }
4852 }
4853 
4854 
StubPrologue()4855 void MacroAssembler::StubPrologue() {
4856     Push(ra, fp, cp);
4857     Push(Smi::FromInt(StackFrame::STUB));
4858     // Adjust FP to point to saved FP.
4859     Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4860 }
4861 
4862 
Prologue(bool code_pre_aging)4863 void MacroAssembler::Prologue(bool code_pre_aging) {
4864   PredictableCodeSizeScope predictible_code_size_scope(
4865       this, kNoCodeAgeSequenceLength);
4866   // The following three instructions must remain together and unmodified
4867   // for code aging to work properly.
4868   if (code_pre_aging) {
4869     // Pre-age the code.
4870     Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4871     nop(Assembler::CODE_AGE_MARKER_NOP);
4872     // Load the stub address to t9 and call it,
4873     // GetCodeAgeAndParity() extracts the stub address from this instruction.
4874     li(t9,
4875        Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
4876        CONSTANT_SIZE);
4877     nop();  // Prevent jalr to jal optimization.
4878     jalr(t9, a0);
4879     nop();  // Branch delay slot nop.
4880     nop();  // Pad the empty space.
4881   } else {
4882     Push(ra, fp, cp, a1);
4883     nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4884     // Adjust fp to point to caller's fp.
4885     Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4886   }
4887 }
4888 
4889 
EnterFrame(StackFrame::Type type)4890 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4891   addiu(sp, sp, -5 * kPointerSize);
4892   li(t8, Operand(Smi::FromInt(type)));
4893   li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4894   sw(ra, MemOperand(sp, 4 * kPointerSize));
4895   sw(fp, MemOperand(sp, 3 * kPointerSize));
4896   sw(cp, MemOperand(sp, 2 * kPointerSize));
4897   sw(t8, MemOperand(sp, 1 * kPointerSize));
4898   sw(t9, MemOperand(sp, 0 * kPointerSize));
4899   // Adjust FP to point to saved FP.
4900   Addu(fp, sp,
4901        Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
4902 }
4903 
4904 
LeaveFrame(StackFrame::Type type)4905 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4906   mov(sp, fp);
4907   lw(fp, MemOperand(sp, 0 * kPointerSize));
4908   lw(ra, MemOperand(sp, 1 * kPointerSize));
4909   addiu(sp, sp, 2 * kPointerSize);
4910 }
4911 
4912 
EnterExitFrame(bool save_doubles,int stack_space)4913 void MacroAssembler::EnterExitFrame(bool save_doubles,
4914                                     int stack_space) {
4915   // Set up the frame structure on the stack.
4916   STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4917   STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4918   STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4919 
4920   // This is how the stack will look:
4921   // fp + 2 (==kCallerSPDisplacement) - old stack's end
4922   // [fp + 1 (==kCallerPCOffset)] - saved old ra
4923   // [fp + 0 (==kCallerFPOffset)] - saved old fp
4924   // [fp - 1 (==kSPOffset)] - sp of the called function
4925   // [fp - 2 (==kCodeOffset)] - CodeObject
4926   // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4927   //   new stack (will contain saved ra)
4928 
4929   // Save registers.
4930   addiu(sp, sp, -4 * kPointerSize);
4931   sw(ra, MemOperand(sp, 3 * kPointerSize));
4932   sw(fp, MemOperand(sp, 2 * kPointerSize));
4933   addiu(fp, sp, 2 * kPointerSize);  // Set up new frame pointer.
4934 
4935   if (emit_debug_code()) {
4936     sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4937   }
4938 
4939   // Accessed from ExitFrame::code_slot.
4940   li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4941   sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4942 
4943   // Save the frame pointer and the context in top.
4944   li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4945   sw(fp, MemOperand(t8));
4946   li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4947   sw(cp, MemOperand(t8));
4948 
4949   const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4950   if (save_doubles) {
4951     // The stack  must be allign to 0 modulo 8 for stores with sdc1.
4952     DCHECK(kDoubleSize == frame_alignment);
4953     if (frame_alignment > 0) {
4954       DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4955       And(sp, sp, Operand(-frame_alignment));  // Align stack.
4956     }
4957     int space = FPURegister::kMaxNumRegisters * kDoubleSize;
4958     Subu(sp, sp, Operand(space));
4959     // Remember: we only need to save every 2nd double FPU value.
4960     for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4961       FPURegister reg = FPURegister::from_code(i);
4962       sdc1(reg, MemOperand(sp, i * kDoubleSize));
4963     }
4964   }
4965 
4966   // Reserve place for the return address, stack space and an optional slot
4967   // (used by the DirectCEntryStub to hold the return value if a struct is
4968   // returned) and align the frame preparing for calling the runtime function.
4969   DCHECK(stack_space >= 0);
4970   Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4971   if (frame_alignment > 0) {
4972     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4973     And(sp, sp, Operand(-frame_alignment));  // Align stack.
4974   }
4975 
4976   // Set the exit frame sp value to point just before the return address
4977   // location.
4978   addiu(at, sp, kPointerSize);
4979   sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4980 }
4981 
4982 
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool do_return)4983 void MacroAssembler::LeaveExitFrame(bool save_doubles,
4984                                     Register argument_count,
4985                                     bool restore_context,
4986                                     bool do_return) {
4987   // Optionally restore all double registers.
4988   if (save_doubles) {
4989     // Remember: we only need to restore every 2nd double FPU value.
4990     lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
4991     for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4992       FPURegister reg = FPURegister::from_code(i);
4993       ldc1(reg, MemOperand(t8, i  * kDoubleSize + kPointerSize));
4994     }
4995   }
4996 
4997   // Clear top frame.
4998   li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4999   sw(zero_reg, MemOperand(t8));
5000 
5001   // Restore current context from top and clear it in debug mode.
5002   if (restore_context) {
5003     li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5004     lw(cp, MemOperand(t8));
5005   }
5006 #ifdef DEBUG
5007   li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5008   sw(a3, MemOperand(t8));
5009 #endif
5010 
5011   // Pop the arguments, restore registers, and return.
5012   mov(sp, fp);  // Respect ABI stack constraint.
5013   lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
5014   lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
5015 
5016   if (argument_count.is_valid()) {
5017     sll(t8, argument_count, kPointerSizeLog2);
5018     addu(sp, sp, t8);
5019   }
5020 
5021   if (do_return) {
5022     Ret(USE_DELAY_SLOT);
5023     // If returning, the instruction in the delay slot will be the addiu below.
5024   }
5025   addiu(sp, sp, 8);
5026 }
5027 
5028 
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)5029 void MacroAssembler::InitializeNewString(Register string,
5030                                          Register length,
5031                                          Heap::RootListIndex map_index,
5032                                          Register scratch1,
5033                                          Register scratch2) {
5034   sll(scratch1, length, kSmiTagSize);
5035   LoadRoot(scratch2, map_index);
5036   sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
5037   li(scratch1, Operand(String::kEmptyHashField));
5038   sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
5039   sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
5040 }
5041 
5042 
ActivationFrameAlignment()5043 int MacroAssembler::ActivationFrameAlignment() {
5044 #if V8_HOST_ARCH_MIPS
5045   // Running on the real platform. Use the alignment as mandated by the local
5046   // environment.
5047   // Note: This will break if we ever start generating snapshots on one Mips
5048   // platform for another Mips platform with a different alignment.
5049   return base::OS::ActivationFrameAlignment();
5050 #else  // V8_HOST_ARCH_MIPS
5051   // If we are using the simulator then we should always align to the expected
5052   // alignment. As the simulator is used to generate snapshots we do not know
5053   // if the target platform will need alignment, so this is controlled from a
5054   // flag.
5055   return FLAG_sim_stack_alignment;
5056 #endif  // V8_HOST_ARCH_MIPS
5057 }
5058 
5059 
AssertStackIsAligned()5060 void MacroAssembler::AssertStackIsAligned() {
5061   if (emit_debug_code()) {
5062       const int frame_alignment = ActivationFrameAlignment();
5063       const int frame_alignment_mask = frame_alignment - 1;
5064 
5065       if (frame_alignment > kPointerSize) {
5066         Label alignment_as_expected;
5067         DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5068         andi(at, sp, frame_alignment_mask);
5069         Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5070         // Don't use Check here, as it will call Runtime_Abort re-entering here.
5071         stop("Unexpected stack alignment");
5072         bind(&alignment_as_expected);
5073       }
5074     }
5075 }
5076 
5077 
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)5078 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
5079     Register reg,
5080     Register scratch,
5081     Label* not_power_of_two_or_zero) {
5082   Subu(scratch, reg, Operand(1));
5083   Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
5084          scratch, Operand(zero_reg));
5085   and_(at, scratch, reg);  // In the delay slot.
5086   Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
5087 }
5088 
5089 
SmiTagCheckOverflow(Register reg,Register overflow)5090 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
5091   DCHECK(!reg.is(overflow));
5092   mov(overflow, reg);  // Save original value.
5093   SmiTag(reg);
5094   xor_(overflow, overflow, reg);  // Overflow if (value ^ 2 * value) < 0.
5095 }
5096 
5097 
SmiTagCheckOverflow(Register dst,Register src,Register overflow)5098 void MacroAssembler::SmiTagCheckOverflow(Register dst,
5099                                          Register src,
5100                                          Register overflow) {
5101   if (dst.is(src)) {
5102     // Fall back to slower case.
5103     SmiTagCheckOverflow(dst, overflow);
5104   } else {
5105     DCHECK(!dst.is(src));
5106     DCHECK(!dst.is(overflow));
5107     DCHECK(!src.is(overflow));
5108     SmiTag(dst, src);
5109     xor_(overflow, dst, src);  // Overflow if (value ^ 2 * value) < 0.
5110   }
5111 }
5112 
5113 
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)5114 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5115                                        Register src,
5116                                        Label* smi_case) {
5117   JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
5118   SmiUntag(dst, src);
5119 }
5120 
5121 
UntagAndJumpIfNotSmi(Register dst,Register src,Label * non_smi_case)5122 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
5123                                           Register src,
5124                                           Label* non_smi_case) {
5125   JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
5126   SmiUntag(dst, src);
5127 }
5128 
JumpIfSmi(Register value,Label * smi_label,Register scratch,BranchDelaySlot bd)5129 void MacroAssembler::JumpIfSmi(Register value,
5130                                Label* smi_label,
5131                                Register scratch,
5132                                BranchDelaySlot bd) {
5133   DCHECK_EQ(0, kSmiTag);
5134   andi(scratch, value, kSmiTagMask);
5135   Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5136 }
5137 
JumpIfNotSmi(Register value,Label * not_smi_label,Register scratch,BranchDelaySlot bd)5138 void MacroAssembler::JumpIfNotSmi(Register value,
5139                                   Label* not_smi_label,
5140                                   Register scratch,
5141                                   BranchDelaySlot bd) {
5142   DCHECK_EQ(0, kSmiTag);
5143   andi(scratch, value, kSmiTagMask);
5144   Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5145 }
5146 
5147 
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)5148 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5149                                       Register reg2,
5150                                       Label* on_not_both_smi) {
5151   STATIC_ASSERT(kSmiTag == 0);
5152   DCHECK_EQ(1, kSmiTagMask);
5153   or_(at, reg1, reg2);
5154   JumpIfNotSmi(at, on_not_both_smi);
5155 }
5156 
5157 
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)5158 void MacroAssembler::JumpIfEitherSmi(Register reg1,
5159                                      Register reg2,
5160                                      Label* on_either_smi) {
5161   STATIC_ASSERT(kSmiTag == 0);
5162   DCHECK_EQ(1, kSmiTagMask);
5163   // Both Smi tags must be 1 (not Smi).
5164   and_(at, reg1, reg2);
5165   JumpIfSmi(at, on_either_smi);
5166 }
5167 
5168 
AssertNotSmi(Register object)5169 void MacroAssembler::AssertNotSmi(Register object) {
5170   if (emit_debug_code()) {
5171     STATIC_ASSERT(kSmiTag == 0);
5172     andi(at, object, kSmiTagMask);
5173     Check(ne, kOperandIsASmi, at, Operand(zero_reg));
5174   }
5175 }
5176 
5177 
AssertSmi(Register object)5178 void MacroAssembler::AssertSmi(Register object) {
5179   if (emit_debug_code()) {
5180     STATIC_ASSERT(kSmiTag == 0);
5181     andi(at, object, kSmiTagMask);
5182     Check(eq, kOperandIsASmi, at, Operand(zero_reg));
5183   }
5184 }
5185 
5186 
AssertString(Register object)5187 void MacroAssembler::AssertString(Register object) {
5188   if (emit_debug_code()) {
5189     STATIC_ASSERT(kSmiTag == 0);
5190     SmiTst(object, t0);
5191     Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
5192     push(object);
5193     lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
5194     lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5195     Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
5196     pop(object);
5197   }
5198 }
5199 
5200 
AssertName(Register object)5201 void MacroAssembler::AssertName(Register object) {
5202   if (emit_debug_code()) {
5203     STATIC_ASSERT(kSmiTag == 0);
5204     SmiTst(object, t0);
5205     Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
5206     push(object);
5207     lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
5208     lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5209     Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
5210     pop(object);
5211   }
5212 }
5213 
5214 
AssertUndefinedOrAllocationSite(Register object,Register scratch)5215 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
5216                                                      Register scratch) {
5217   if (emit_debug_code()) {
5218     Label done_checking;
5219     AssertNotSmi(object);
5220     LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5221     Branch(&done_checking, eq, object, Operand(scratch));
5222     push(object);
5223     lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
5224     LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
5225     Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
5226     pop(object);
5227     bind(&done_checking);
5228   }
5229 }
5230 
5231 
AssertIsRoot(Register reg,Heap::RootListIndex index)5232 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
5233   if (emit_debug_code()) {
5234     DCHECK(!reg.is(at));
5235     LoadRoot(at, index);
5236     Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
5237   }
5238 }
5239 
5240 
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)5241 void MacroAssembler::JumpIfNotHeapNumber(Register object,
5242                                          Register heap_number_map,
5243                                          Register scratch,
5244                                          Label* on_not_heap_number) {
5245   lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5246   AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
5247   Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
5248 }
5249 
5250 
LookupNumberStringCache(Register object,Register result,Register scratch1,Register scratch2,Register scratch3,Label * not_found)5251 void MacroAssembler::LookupNumberStringCache(Register object,
5252                                              Register result,
5253                                              Register scratch1,
5254                                              Register scratch2,
5255                                              Register scratch3,
5256                                              Label* not_found) {
5257   // Use of registers. Register result is used as a temporary.
5258   Register number_string_cache = result;
5259   Register mask = scratch3;
5260 
5261   // Load the number string cache.
5262   LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
5263 
5264   // Make the hash mask from the length of the number string cache. It
5265   // contains two elements (number and string) for each cache entry.
5266   lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
5267   // Divide length by two (length is a smi).
5268   sra(mask, mask, kSmiTagSize + 1);
5269   Addu(mask, mask, -1);  // Make mask.
5270 
5271   // Calculate the entry in the number string cache. The hash value in the
5272   // number string cache for smis is just the smi value, and the hash for
5273   // doubles is the xor of the upper and lower words. See
5274   // Heap::GetNumberStringCache.
5275   Label is_smi;
5276   Label load_result_from_cache;
5277   JumpIfSmi(object, &is_smi);
5278   CheckMap(object,
5279            scratch1,
5280            Heap::kHeapNumberMapRootIndex,
5281            not_found,
5282            DONT_DO_SMI_CHECK);
5283 
5284   STATIC_ASSERT(8 == kDoubleSize);
5285   Addu(scratch1,
5286        object,
5287        Operand(HeapNumber::kValueOffset - kHeapObjectTag));
5288   lw(scratch2, MemOperand(scratch1, kPointerSize));
5289   lw(scratch1, MemOperand(scratch1, 0));
5290   Xor(scratch1, scratch1, Operand(scratch2));
5291   And(scratch1, scratch1, Operand(mask));
5292 
5293   // Calculate address of entry in string cache: each entry consists
5294   // of two pointer sized fields.
5295   sll(scratch1, scratch1, kPointerSizeLog2 + 1);
5296   Addu(scratch1, number_string_cache, scratch1);
5297 
5298   Register probe = mask;
5299   lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
5300   JumpIfSmi(probe, not_found);
5301   ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
5302   ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
5303   BranchF(&load_result_from_cache, NULL, eq, f12, f14);
5304   Branch(not_found);
5305 
5306   bind(&is_smi);
5307   Register scratch = scratch1;
5308   sra(scratch, object, 1);   // Shift away the tag.
5309   And(scratch, mask, Operand(scratch));
5310 
5311   // Calculate address of entry in string cache: each entry consists
5312   // of two pointer sized fields.
5313   sll(scratch, scratch, kPointerSizeLog2 + 1);
5314   Addu(scratch, number_string_cache, scratch);
5315 
5316   // Check if the entry is the smi we are looking for.
5317   lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
5318   Branch(not_found, ne, object, Operand(probe));
5319 
5320   // Get the result from the cache.
5321   bind(&load_result_from_cache);
5322   lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
5323 
5324   IncrementCounter(isolate()->counters()->number_to_string_native(),
5325                    1,
5326                    scratch1,
5327                    scratch2);
5328 }
5329 
5330 
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)5331 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
5332     Register first, Register second, Register scratch1, Register scratch2,
5333     Label* failure) {
5334   // Test that both first and second are sequential one-byte strings.
5335   // Assume that they are non-smis.
5336   lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
5337   lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
5338   lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
5339   lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
5340 
5341   JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
5342                                                  scratch2, failure);
5343 }
5344 
5345 
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)5346 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
5347                                                            Register second,
5348                                                            Register scratch1,
5349                                                            Register scratch2,
5350                                                            Label* failure) {
5351   // Check that neither is a smi.
5352   STATIC_ASSERT(kSmiTag == 0);
5353   And(scratch1, first, Operand(second));
5354   JumpIfSmi(scratch1, failure);
5355   JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
5356                                                scratch2, failure);
5357 }
5358 
5359 
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)5360 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
5361     Register first, Register second, Register scratch1, Register scratch2,
5362     Label* failure) {
5363   const int kFlatOneByteStringMask =
5364       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5365   const int kFlatOneByteStringTag =
5366       kStringTag | kOneByteStringTag | kSeqStringTag;
5367   DCHECK(kFlatOneByteStringTag <= 0xffff);  // Ensure this fits 16-bit immed.
5368   andi(scratch1, first, kFlatOneByteStringMask);
5369   Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
5370   andi(scratch2, second, kFlatOneByteStringMask);
5371   Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
5372 }
5373 
5374 
JumpIfInstanceTypeIsNotSequentialOneByte(Register type,Register scratch,Label * failure)5375 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
5376                                                               Register scratch,
5377                                                               Label* failure) {
5378   const int kFlatOneByteStringMask =
5379       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5380   const int kFlatOneByteStringTag =
5381       kStringTag | kOneByteStringTag | kSeqStringTag;
5382   And(scratch, type, Operand(kFlatOneByteStringMask));
5383   Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
5384 }
5385 
5386 
5387 static const int kRegisterPassedArguments = 4;
5388 
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)5389 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5390                                               int num_double_arguments) {
5391   int stack_passed_words = 0;
5392   num_reg_arguments += 2 * num_double_arguments;
5393 
5394   // Up to four simple arguments are passed in registers a0..a3.
5395   if (num_reg_arguments > kRegisterPassedArguments) {
5396     stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5397   }
5398   stack_passed_words += kCArgSlotCount;
5399   return stack_passed_words;
5400 }
5401 
5402 
EmitSeqStringSetCharCheck(Register string,Register index,Register value,Register scratch,uint32_t encoding_mask)5403 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5404                                                Register index,
5405                                                Register value,
5406                                                Register scratch,
5407                                                uint32_t encoding_mask) {
5408   Label is_object;
5409   SmiTst(string, at);
5410   Check(ne, kNonObject, at, Operand(zero_reg));
5411 
5412   lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
5413   lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5414 
5415   andi(at, at, kStringRepresentationMask | kStringEncodingMask);
5416   li(scratch, Operand(encoding_mask));
5417   Check(eq, kUnexpectedStringType, at, Operand(scratch));
5418 
5419   // The index is assumed to be untagged coming in, tag it to compare with the
5420   // string length without using a temp register, it is restored at the end of
5421   // this function.
5422   Label index_tag_ok, index_tag_bad;
5423   TrySmiTag(index, scratch, &index_tag_bad);
5424   Branch(&index_tag_ok);
5425   bind(&index_tag_bad);
5426   Abort(kIndexIsTooLarge);
5427   bind(&index_tag_ok);
5428 
5429   lw(at, FieldMemOperand(string, String::kLengthOffset));
5430   Check(lt, kIndexIsTooLarge, index, Operand(at));
5431 
5432   DCHECK(Smi::FromInt(0) == 0);
5433   Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5434 
5435   SmiUntag(index, index);
5436 }
5437 
5438 
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)5439 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5440                                           int num_double_arguments,
5441                                           Register scratch) {
5442   int frame_alignment = ActivationFrameAlignment();
5443 
5444   // Up to four simple arguments are passed in registers a0..a3.
5445   // Those four arguments must have reserved argument slots on the stack for
5446   // mips, even though those argument slots are not normally used.
5447   // Remaining arguments are pushed on the stack, above (higher address than)
5448   // the argument slots.
5449   int stack_passed_arguments = CalculateStackPassedWords(
5450       num_reg_arguments, num_double_arguments);
5451   if (frame_alignment > kPointerSize) {
5452     // Make stack end at alignment and make room for num_arguments - 4 words
5453     // and the original value of sp.
5454     mov(scratch, sp);
5455     Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5456     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5457     And(sp, sp, Operand(-frame_alignment));
5458     sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5459   } else {
5460     Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5461   }
5462 }
5463 
5464 
PrepareCallCFunction(int num_reg_arguments,Register scratch)5465 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5466                                           Register scratch) {
5467   PrepareCallCFunction(num_reg_arguments, 0, scratch);
5468 }
5469 
5470 
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)5471 void MacroAssembler::CallCFunction(ExternalReference function,
5472                                    int num_reg_arguments,
5473                                    int num_double_arguments) {
5474   li(t8, Operand(function));
5475   CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5476 }
5477 
5478 
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)5479 void MacroAssembler::CallCFunction(Register function,
5480                                    int num_reg_arguments,
5481                                    int num_double_arguments) {
5482   CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5483 }
5484 
5485 
CallCFunction(ExternalReference function,int num_arguments)5486 void MacroAssembler::CallCFunction(ExternalReference function,
5487                                    int num_arguments) {
5488   CallCFunction(function, num_arguments, 0);
5489 }
5490 
5491 
CallCFunction(Register function,int num_arguments)5492 void MacroAssembler::CallCFunction(Register function,
5493                                    int num_arguments) {
5494   CallCFunction(function, num_arguments, 0);
5495 }
5496 
5497 
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)5498 void MacroAssembler::CallCFunctionHelper(Register function,
5499                                          int num_reg_arguments,
5500                                          int num_double_arguments) {
5501   DCHECK(has_frame());
5502   // Make sure that the stack is aligned before calling a C function unless
5503   // running in the simulator. The simulator has its own alignment check which
5504   // provides more information.
5505   // The argument stots are presumed to have been set up by
5506   // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5507 
5508 #if V8_HOST_ARCH_MIPS
5509   if (emit_debug_code()) {
5510     int frame_alignment = base::OS::ActivationFrameAlignment();
5511     int frame_alignment_mask = frame_alignment - 1;
5512     if (frame_alignment > kPointerSize) {
5513       DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5514       Label alignment_as_expected;
5515       And(at, sp, Operand(frame_alignment_mask));
5516       Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5517       // Don't use Check here, as it will call Runtime_Abort possibly
5518       // re-entering here.
5519       stop("Unexpected alignment in CallCFunction");
5520       bind(&alignment_as_expected);
5521     }
5522   }
5523 #endif  // V8_HOST_ARCH_MIPS
5524 
5525   // Just call directly. The function called cannot cause a GC, or
5526   // allow preemption, so the return address in the link register
5527   // stays correct.
5528 
5529   if (!function.is(t9)) {
5530     mov(t9, function);
5531     function = t9;
5532   }
5533 
5534   Call(function);
5535 
5536   int stack_passed_arguments = CalculateStackPassedWords(
5537       num_reg_arguments, num_double_arguments);
5538 
5539   if (base::OS::ActivationFrameAlignment() > kPointerSize) {
5540     lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5541   } else {
5542     Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
5543   }
5544 }
5545 
5546 
5547 #undef BRANCH_ARGS_CHECK
5548 
5549 
PatchRelocatedValue(Register li_location,Register scratch,Register new_value)5550 void MacroAssembler::PatchRelocatedValue(Register li_location,
5551                                          Register scratch,
5552                                          Register new_value) {
5553   lw(scratch, MemOperand(li_location));
5554   // At this point scratch is a lui(at, ...) instruction.
5555   if (emit_debug_code()) {
5556     And(scratch, scratch, kOpcodeMask);
5557     Check(eq, kTheInstructionToPatchShouldBeALui,
5558         scratch, Operand(LUI));
5559     lw(scratch, MemOperand(li_location));
5560   }
5561   srl(t9, new_value, kImm16Bits);
5562   Ins(scratch, t9, 0, kImm16Bits);
5563   sw(scratch, MemOperand(li_location));
5564 
5565   lw(scratch, MemOperand(li_location, kInstrSize));
5566   // scratch is now ori(at, ...).
5567   if (emit_debug_code()) {
5568     And(scratch, scratch, kOpcodeMask);
5569     Check(eq, kTheInstructionToPatchShouldBeAnOri,
5570         scratch, Operand(ORI));
5571     lw(scratch, MemOperand(li_location, kInstrSize));
5572   }
5573   Ins(scratch, new_value, 0, kImm16Bits);
5574   sw(scratch, MemOperand(li_location, kInstrSize));
5575 
5576   // Update the I-cache so the new lui and ori can be executed.
5577   FlushICache(li_location, 2);
5578 }
5579 
GetRelocatedValue(Register li_location,Register value,Register scratch)5580 void MacroAssembler::GetRelocatedValue(Register li_location,
5581                                        Register value,
5582                                        Register scratch) {
5583   lw(value, MemOperand(li_location));
5584   if (emit_debug_code()) {
5585     And(value, value, kOpcodeMask);
5586     Check(eq, kTheInstructionShouldBeALui,
5587         value, Operand(LUI));
5588     lw(value, MemOperand(li_location));
5589   }
5590 
5591   // value now holds a lui instruction. Extract the immediate.
5592   sll(value, value, kImm16Bits);
5593 
5594   lw(scratch, MemOperand(li_location, kInstrSize));
5595   if (emit_debug_code()) {
5596     And(scratch, scratch, kOpcodeMask);
5597     Check(eq, kTheInstructionShouldBeAnOri,
5598         scratch, Operand(ORI));
5599     lw(scratch, MemOperand(li_location, kInstrSize));
5600   }
5601   // "scratch" now holds an ori instruction. Extract the immediate.
5602   andi(scratch, scratch, kImm16Mask);
5603 
5604   // Merge the results.
5605   or_(value, value, scratch);
5606 }
5607 
5608 
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)5609 void MacroAssembler::CheckPageFlag(
5610     Register object,
5611     Register scratch,
5612     int mask,
5613     Condition cc,
5614     Label* condition_met) {
5615   And(scratch, object, Operand(~Page::kPageAlignmentMask));
5616   lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5617   And(scratch, scratch, Operand(mask));
5618   Branch(condition_met, cc, scratch, Operand(zero_reg));
5619 }
5620 
5621 
CheckMapDeprecated(Handle<Map> map,Register scratch,Label * if_deprecated)5622 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5623                                         Register scratch,
5624                                         Label* if_deprecated) {
5625   if (map->CanBeDeprecated()) {
5626     li(scratch, Operand(map));
5627     lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
5628     And(scratch, scratch, Operand(Map::Deprecated::kMask));
5629     Branch(if_deprecated, ne, scratch, Operand(zero_reg));
5630   }
5631 }
5632 
5633 
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)5634 void MacroAssembler::JumpIfBlack(Register object,
5635                                  Register scratch0,
5636                                  Register scratch1,
5637                                  Label* on_black) {
5638   HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
5639   DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5640 }
5641 
5642 
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)5643 void MacroAssembler::HasColor(Register object,
5644                               Register bitmap_scratch,
5645                               Register mask_scratch,
5646                               Label* has_color,
5647                               int first_bit,
5648                               int second_bit) {
5649   DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5650   DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5651 
5652   GetMarkBits(object, bitmap_scratch, mask_scratch);
5653 
5654   Label other_color, word_boundary;
5655   lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5656   And(t8, t9, Operand(mask_scratch));
5657   Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5658   // Shift left 1 by adding.
5659   Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5660   Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5661   And(t8, t9, Operand(mask_scratch));
5662   Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5663   jmp(&other_color);
5664 
5665   bind(&word_boundary);
5666   lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5667   And(t9, t9, Operand(1));
5668   Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5669   bind(&other_color);
5670 }
5671 
5672 
5673 // Detect some, but not all, common pointer-free objects.  This is used by the
5674 // incremental write barrier which doesn't care about oddballs (they are always
5675 // marked black immediately so this code is not hit).
JumpIfDataObject(Register value,Register scratch,Label * not_data_object)5676 void MacroAssembler::JumpIfDataObject(Register value,
5677                                       Register scratch,
5678                                       Label* not_data_object) {
5679   DCHECK(!AreAliased(value, scratch, t8, no_reg));
5680   Label is_data_object;
5681   lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5682   LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5683   Branch(&is_data_object, eq, t8, Operand(scratch));
5684   DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5685   DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5686   // If it's a string and it's not a cons string then it's an object containing
5687   // no GC pointers.
5688   lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5689   And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5690   Branch(not_data_object, ne, t8, Operand(zero_reg));
5691   bind(&is_data_object);
5692 }
5693 
5694 
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)5695 void MacroAssembler::GetMarkBits(Register addr_reg,
5696                                  Register bitmap_reg,
5697                                  Register mask_reg) {
5698   DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5699   And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5700   Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5701   const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5702   Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5703   sll(t8, t8, kPointerSizeLog2);
5704   Addu(bitmap_reg, bitmap_reg, t8);
5705   li(t8, Operand(1));
5706   sllv(mask_reg, t8, mask_reg);
5707 }
5708 
5709 
EnsureNotWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white_and_not_data)5710 void MacroAssembler::EnsureNotWhite(
5711     Register value,
5712     Register bitmap_scratch,
5713     Register mask_scratch,
5714     Register load_scratch,
5715     Label* value_is_white_and_not_data) {
5716   DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5717   GetMarkBits(value, bitmap_scratch, mask_scratch);
5718 
5719   // If the value is black or grey we don't need to do anything.
5720   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5721   DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5722   DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
5723   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5724 
5725   Label done;
5726 
5727   // Since both black and grey have a 1 in the first position and white does
5728   // not have a 1 there we only need to check one bit.
5729   lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5730   And(t8, mask_scratch, load_scratch);
5731   Branch(&done, ne, t8, Operand(zero_reg));
5732 
5733   if (emit_debug_code()) {
5734     // Check for impossible bit pattern.
5735     Label ok;
5736     // sll may overflow, making the check conservative.
5737     sll(t8, mask_scratch, 1);
5738     And(t8, load_scratch, t8);
5739     Branch(&ok, eq, t8, Operand(zero_reg));
5740     stop("Impossible marking bit pattern");
5741     bind(&ok);
5742   }
5743 
5744   // Value is white.  We check whether it is data that doesn't need scanning.
5745   // Currently only checks for HeapNumber and non-cons strings.
5746   Register map = load_scratch;  // Holds map while checking type.
5747   Register length = load_scratch;  // Holds length of object after testing type.
5748   Label is_data_object;
5749 
5750   // Check for heap-number
5751   lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
5752   LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5753   {
5754     Label skip;
5755     Branch(&skip, ne, t8, Operand(map));
5756     li(length, HeapNumber::kSize);
5757     Branch(&is_data_object);
5758     bind(&skip);
5759   }
5760 
5761   // Check for strings.
5762   DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5763   DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5764   // If it's a string and it's not a cons string then it's an object containing
5765   // no GC pointers.
5766   Register instance_type = load_scratch;
5767   lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5768   And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5769   Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5770   // It's a non-indirect (non-cons and non-slice) string.
5771   // If it's external, the length is just ExternalString::kSize.
5772   // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5773   // External strings are the only ones with the kExternalStringTag bit
5774   // set.
5775   DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
5776   DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
5777   And(t8, instance_type, Operand(kExternalStringTag));
5778   {
5779     Label skip;
5780     Branch(&skip, eq, t8, Operand(zero_reg));
5781     li(length, ExternalString::kSize);
5782     Branch(&is_data_object);
5783     bind(&skip);
5784   }
5785 
5786   // Sequential string, either Latin1 or UC16.
5787   // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
5788   // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5789   // getting the length multiplied by 2.
5790   DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5791   DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
5792   lw(t9, FieldMemOperand(value, String::kLengthOffset));
5793   And(t8, instance_type, Operand(kStringEncodingMask));
5794   {
5795     Label skip;
5796     Branch(&skip, eq, t8, Operand(zero_reg));
5797     srl(t9, t9, 1);
5798     bind(&skip);
5799   }
5800   Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5801   And(length, length, Operand(~kObjectAlignmentMask));
5802 
5803   bind(&is_data_object);
5804   // Value is a data object, and it is white.  Mark it black.  Since we know
5805   // that the object is white we can make it black by flipping one bit.
5806   lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5807   Or(t8, t8, Operand(mask_scratch));
5808   sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5809 
5810   And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5811   lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5812   Addu(t8, t8, Operand(length));
5813   sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5814 
5815   bind(&done);
5816 }
5817 
5818 
LoadInstanceDescriptors(Register map,Register descriptors)5819 void MacroAssembler::LoadInstanceDescriptors(Register map,
5820                                              Register descriptors) {
5821   lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5822 }
5823 
5824 
NumberOfOwnDescriptors(Register dst,Register map)5825 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5826   lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5827   DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5828 }
5829 
5830 
EnumLength(Register dst,Register map)5831 void MacroAssembler::EnumLength(Register dst, Register map) {
5832   STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5833   lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5834   And(dst, dst, Operand(Map::EnumLengthBits::kMask));
5835   SmiTag(dst);
5836 }
5837 
5838 
CheckEnumCache(Register null_value,Label * call_runtime)5839 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5840   Register  empty_fixed_array_value = t2;
5841   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5842   Label next, start;
5843   mov(a2, a0);
5844 
5845   // Check if the enum length field is properly initialized, indicating that
5846   // there is an enum cache.
5847   lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5848 
5849   EnumLength(a3, a1);
5850   Branch(
5851       call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5852 
5853   jmp(&start);
5854 
5855   bind(&next);
5856   lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5857 
5858   // For all objects but the receiver, check that the cache is empty.
5859   EnumLength(a3, a1);
5860   Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5861 
5862   bind(&start);
5863 
5864   // Check that there are no elements. Register a2 contains the current JS
5865   // object we've reached through the prototype chain.
5866   Label no_elements;
5867   lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5868   Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
5869 
5870   // Second chance, the object may be using the empty slow element dictionary.
5871   LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5872   Branch(call_runtime, ne, a2, Operand(at));
5873 
5874   bind(&no_elements);
5875   lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5876   Branch(&next, ne, a2, Operand(null_value));
5877 }
5878 
5879 
ClampUint8(Register output_reg,Register input_reg)5880 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5881   DCHECK(!output_reg.is(input_reg));
5882   Label done;
5883   li(output_reg, Operand(255));
5884   // Normal branch: nop in delay slot.
5885   Branch(&done, gt, input_reg, Operand(output_reg));
5886   // Use delay slot in this branch.
5887   Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5888   mov(output_reg, zero_reg);  // In delay slot.
5889   mov(output_reg, input_reg);  // Value is in range 0..255.
5890   bind(&done);
5891 }
5892 
5893 
ClampDoubleToUint8(Register result_reg,DoubleRegister input_reg,DoubleRegister temp_double_reg)5894 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5895                                         DoubleRegister input_reg,
5896                                         DoubleRegister temp_double_reg) {
5897   Label above_zero;
5898   Label done;
5899   Label in_bounds;
5900 
5901   Move(temp_double_reg, 0.0);
5902   BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5903 
5904   // Double value is less than zero, NaN or Inf, return 0.
5905   mov(result_reg, zero_reg);
5906   Branch(&done);
5907 
5908   // Double value is >= 255, return 255.
5909   bind(&above_zero);
5910   Move(temp_double_reg, 255.0);
5911   BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5912   li(result_reg, Operand(255));
5913   Branch(&done);
5914 
5915   // In 0-255 range, round and truncate.
5916   bind(&in_bounds);
5917   cvt_w_d(temp_double_reg, input_reg);
5918   mfc1(result_reg, temp_double_reg);
5919   bind(&done);
5920 }
5921 
5922 
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found,Condition cond,Label * allocation_memento_present)5923 void MacroAssembler::TestJSArrayForAllocationMemento(
5924     Register receiver_reg,
5925     Register scratch_reg,
5926     Label* no_memento_found,
5927     Condition cond,
5928     Label* allocation_memento_present) {
5929   ExternalReference new_space_start =
5930       ExternalReference::new_space_start(isolate());
5931   ExternalReference new_space_allocation_top =
5932       ExternalReference::new_space_allocation_top_address(isolate());
5933   Addu(scratch_reg, receiver_reg,
5934        Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5935   Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
5936   li(at, Operand(new_space_allocation_top));
5937   lw(at, MemOperand(at));
5938   Branch(no_memento_found, gt, scratch_reg, Operand(at));
5939   lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
5940   if (allocation_memento_present) {
5941     Branch(allocation_memento_present, cond, scratch_reg,
5942            Operand(isolate()->factory()->allocation_memento_map()));
5943   }
5944 }
5945 
5946 
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)5947 Register GetRegisterThatIsNotOneOf(Register reg1,
5948                                    Register reg2,
5949                                    Register reg3,
5950                                    Register reg4,
5951                                    Register reg5,
5952                                    Register reg6) {
5953   RegList regs = 0;
5954   if (reg1.is_valid()) regs |= reg1.bit();
5955   if (reg2.is_valid()) regs |= reg2.bit();
5956   if (reg3.is_valid()) regs |= reg3.bit();
5957   if (reg4.is_valid()) regs |= reg4.bit();
5958   if (reg5.is_valid()) regs |= reg5.bit();
5959   if (reg6.is_valid()) regs |= reg6.bit();
5960 
5961   for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
5962     Register candidate = Register::FromAllocationIndex(i);
5963     if (regs & candidate.bit()) continue;
5964     return candidate;
5965   }
5966   UNREACHABLE();
5967   return no_reg;
5968 }
5969 
5970 
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)5971 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5972     Register object,
5973     Register scratch0,
5974     Register scratch1,
5975     Label* found) {
5976   DCHECK(!scratch1.is(scratch0));
5977   Factory* factory = isolate()->factory();
5978   Register current = scratch0;
5979   Label loop_again;
5980 
5981   // Scratch contained elements pointer.
5982   Move(current, object);
5983 
5984   // Loop based on the map going up the prototype chain.
5985   bind(&loop_again);
5986   lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
5987   lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
5988   DecodeField<Map::ElementsKindBits>(scratch1);
5989   Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
5990   lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
5991   Branch(&loop_again, ne, current, Operand(factory->null_value()));
5992 }
5993 
5994 
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8)5995 bool AreAliased(Register reg1,
5996                 Register reg2,
5997                 Register reg3,
5998                 Register reg4,
5999                 Register reg5,
6000                 Register reg6,
6001                 Register reg7,
6002                 Register reg8) {
6003   int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
6004       reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
6005       reg7.is_valid() + reg8.is_valid();
6006 
6007   RegList regs = 0;
6008   if (reg1.is_valid()) regs |= reg1.bit();
6009   if (reg2.is_valid()) regs |= reg2.bit();
6010   if (reg3.is_valid()) regs |= reg3.bit();
6011   if (reg4.is_valid()) regs |= reg4.bit();
6012   if (reg5.is_valid()) regs |= reg5.bit();
6013   if (reg6.is_valid()) regs |= reg6.bit();
6014   if (reg7.is_valid()) regs |= reg7.bit();
6015   if (reg8.is_valid()) regs |= reg8.bit();
6016   int n_of_non_aliasing_regs = NumRegs(regs);
6017 
6018   return n_of_valid_regs != n_of_non_aliasing_regs;
6019 }
6020 
6021 
CodePatcher(byte * address,int instructions,FlushICache flush_cache)6022 CodePatcher::CodePatcher(byte* address,
6023                          int instructions,
6024                          FlushICache flush_cache)
6025     : address_(address),
6026       size_(instructions * Assembler::kInstrSize),
6027       masm_(NULL, address, size_ + Assembler::kGap),
6028       flush_cache_(flush_cache) {
6029   // Create a new macro assembler pointing to the address of the code to patch.
6030   // The size is adjusted with kGap on order for the assembler to generate size
6031   // bytes of instructions without failing with buffer size constraints.
6032   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6033 }
6034 
6035 
~CodePatcher()6036 CodePatcher::~CodePatcher() {
6037   // Indicate that code has changed.
6038   if (flush_cache_ == FLUSH) {
6039     CpuFeatures::FlushICache(address_, size_);
6040   }
6041 
6042   // Check that the code was patched as expected.
6043   DCHECK(masm_.pc_ == address_ + size_);
6044   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6045 }
6046 
6047 
Emit(Instr instr)6048 void CodePatcher::Emit(Instr instr) {
6049   masm()->emit(instr);
6050 }
6051 
6052 
Emit(Address addr)6053 void CodePatcher::Emit(Address addr) {
6054   masm()->emit(reinterpret_cast<Instr>(addr));
6055 }
6056 
6057 
ChangeBranchCondition(Condition cond)6058 void CodePatcher::ChangeBranchCondition(Condition cond) {
6059   Instr instr = Assembler::instr_at(masm_.pc_);
6060   DCHECK(Assembler::IsBranch(instr));
6061   uint32_t opcode = Assembler::GetOpcodeField(instr);
6062   // Currently only the 'eq' and 'ne' cond values are supported and the simple
6063   // branch instructions (with opcode being the branch type).
6064   // There are some special cases (see Assembler::IsBranch()) so extending this
6065   // would be tricky.
6066   DCHECK(opcode == BEQ ||
6067          opcode == BNE ||
6068         opcode == BLEZ ||
6069         opcode == BGTZ ||
6070         opcode == BEQL ||
6071         opcode == BNEL ||
6072        opcode == BLEZL ||
6073        opcode == BGTZL);
6074   opcode = (cond == eq) ? BEQ : BNE;
6075   instr = (instr & ~kOpcodeMask) | opcode;
6076   masm_.emit(instr);
6077 }
6078 
6079 
TruncatingDiv(Register result,Register dividend,int32_t divisor)6080 void MacroAssembler::TruncatingDiv(Register result,
6081                                    Register dividend,
6082                                    int32_t divisor) {
6083   DCHECK(!dividend.is(result));
6084   DCHECK(!dividend.is(at));
6085   DCHECK(!result.is(at));
6086   base::MagicNumbersForDivision<uint32_t> mag =
6087       base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
6088   li(at, Operand(mag.multiplier));
6089   Mulh(result, dividend, Operand(at));
6090   bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
6091   if (divisor > 0 && neg) {
6092     Addu(result, result, Operand(dividend));
6093   }
6094   if (divisor < 0 && !neg && mag.multiplier > 0) {
6095     Subu(result, result, Operand(dividend));
6096   }
6097   if (mag.shift > 0) sra(result, result, mag.shift);
6098   srl(at, dividend, 31);
6099   Addu(result, result, Operand(at));
6100 }
6101 
6102 
6103 } }  // namespace v8::internal
6104 
6105 #endif  // V8_TARGET_ARCH_MIPS
6106