• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <limits.h>  // For LONG_MIN, LONG_MAX.
6 
7 #if V8_TARGET_ARCH_MIPS
8 
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/debug/debug.h"
14 #include "src/mips/macro-assembler-mips.h"
15 #include "src/register-configuration.h"
16 #include "src/runtime/runtime.h"
17 
18 namespace v8 {
19 namespace internal {
20 
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)21 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
22                                CodeObjectRequired create_code_object)
23     : Assembler(arg_isolate, buffer, size),
24       generating_stub_(false),
25       has_frame_(false),
26       has_double_zero_reg_set_(false) {
27   if (create_code_object == CodeObjectRequired::kYes) {
28     code_object_ =
29         Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
30   }
31 }
32 
Load(Register dst,const MemOperand & src,Representation r)33 void MacroAssembler::Load(Register dst,
34                           const MemOperand& src,
35                           Representation r) {
36   DCHECK(!r.IsDouble());
37   if (r.IsInteger8()) {
38     lb(dst, src);
39   } else if (r.IsUInteger8()) {
40     lbu(dst, src);
41   } else if (r.IsInteger16()) {
42     lh(dst, src);
43   } else if (r.IsUInteger16()) {
44     lhu(dst, src);
45   } else {
46     lw(dst, src);
47   }
48 }
49 
50 
Store(Register src,const MemOperand & dst,Representation r)51 void MacroAssembler::Store(Register src,
52                            const MemOperand& dst,
53                            Representation r) {
54   DCHECK(!r.IsDouble());
55   if (r.IsInteger8() || r.IsUInteger8()) {
56     sb(src, dst);
57   } else if (r.IsInteger16() || r.IsUInteger16()) {
58     sh(src, dst);
59   } else {
60     if (r.IsHeapObject()) {
61       AssertNotSmi(src);
62     } else if (r.IsSmi()) {
63       AssertSmi(src);
64     }
65     sw(src, dst);
66   }
67 }
68 
LoadRoot(Register destination,Heap::RootListIndex index)69 void MacroAssembler::LoadRoot(Register destination,
70                               Heap::RootListIndex index) {
71   lw(destination, MemOperand(s6, index << kPointerSizeLog2));
72 }
73 
74 
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)75 void MacroAssembler::LoadRoot(Register destination,
76                               Heap::RootListIndex index,
77                               Condition cond,
78                               Register src1, const Operand& src2) {
79   Branch(2, NegateCondition(cond), src1, src2);
80   lw(destination, MemOperand(s6, index << kPointerSizeLog2));
81 }
82 
83 
StoreRoot(Register source,Heap::RootListIndex index)84 void MacroAssembler::StoreRoot(Register source,
85                                Heap::RootListIndex index) {
86   DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
87   sw(source, MemOperand(s6, index << kPointerSizeLog2));
88 }
89 
90 
StoreRoot(Register source,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)91 void MacroAssembler::StoreRoot(Register source,
92                                Heap::RootListIndex index,
93                                Condition cond,
94                                Register src1, const Operand& src2) {
95   DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
96   Branch(2, NegateCondition(cond), src1, src2);
97   sw(source, MemOperand(s6, index << kPointerSizeLog2));
98 }
99 
PushCommonFrame(Register marker_reg)100 void MacroAssembler::PushCommonFrame(Register marker_reg) {
101   if (marker_reg.is_valid()) {
102     Push(ra, fp, marker_reg);
103     Addu(fp, sp, Operand(kPointerSize));
104   } else {
105     Push(ra, fp);
106     mov(fp, sp);
107   }
108 }
109 
PopCommonFrame(Register marker_reg)110 void MacroAssembler::PopCommonFrame(Register marker_reg) {
111   if (marker_reg.is_valid()) {
112     Pop(ra, fp, marker_reg);
113   } else {
114     Pop(ra, fp);
115   }
116 }
117 
PushStandardFrame(Register function_reg)118 void MacroAssembler::PushStandardFrame(Register function_reg) {
119   int offset = -StandardFrameConstants::kContextOffset;
120   if (function_reg.is_valid()) {
121     Push(ra, fp, cp, function_reg);
122     offset += kPointerSize;
123   } else {
124     Push(ra, fp, cp);
125   }
126   Addu(fp, sp, Operand(offset));
127 }
128 
129 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()130 void MacroAssembler::PushSafepointRegisters() {
131   // Safepoints expect a block of kNumSafepointRegisters values on the
132   // stack, so adjust the stack for unsaved registers.
133   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
134   DCHECK(num_unsaved >= 0);
135   if (num_unsaved > 0) {
136     Subu(sp, sp, Operand(num_unsaved * kPointerSize));
137   }
138   MultiPush(kSafepointSavedRegisters);
139 }
140 
141 
PopSafepointRegisters()142 void MacroAssembler::PopSafepointRegisters() {
143   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
144   MultiPop(kSafepointSavedRegisters);
145   if (num_unsaved > 0) {
146     Addu(sp, sp, Operand(num_unsaved * kPointerSize));
147   }
148 }
149 
150 
StoreToSafepointRegisterSlot(Register src,Register dst)151 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
152   sw(src, SafepointRegisterSlot(dst));
153 }
154 
155 
LoadFromSafepointRegisterSlot(Register dst,Register src)156 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
157   lw(dst, SafepointRegisterSlot(src));
158 }
159 
160 
SafepointRegisterStackIndex(int reg_code)161 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
162   // The registers are pushed starting with the highest encoding,
163   // which means that lowest encodings are closest to the stack pointer.
164   return kSafepointRegisterStackIndexMap[reg_code];
165 }
166 
167 
SafepointRegisterSlot(Register reg)168 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
169   return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
170 }
171 
172 
SafepointRegistersAndDoublesSlot(Register reg)173 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
174   UNIMPLEMENTED_MIPS();
175   // General purpose registers are pushed last on the stack.
176   int doubles_size = DoubleRegister::kMaxNumRegisters * kDoubleSize;
177   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
178   return MemOperand(sp, doubles_size + register_offset);
179 }
180 
181 
InNewSpace(Register object,Register scratch,Condition cc,Label * branch)182 void MacroAssembler::InNewSpace(Register object,
183                                 Register scratch,
184                                 Condition cc,
185                                 Label* branch) {
186   DCHECK(cc == eq || cc == ne);
187   CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc, branch);
188 }
189 
190 
191 // Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
192 // The register 'object' contains a heap object pointer.  The heap object
193 // tag is shifted away.
RecordWriteField(Register object,int offset,Register value,Register dst,RAStatus ra_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)194 void MacroAssembler::RecordWriteField(
195     Register object,
196     int offset,
197     Register value,
198     Register dst,
199     RAStatus ra_status,
200     SaveFPRegsMode save_fp,
201     RememberedSetAction remembered_set_action,
202     SmiCheck smi_check,
203     PointersToHereCheck pointers_to_here_check_for_value) {
204   DCHECK(!AreAliased(value, dst, t8, object));
205   // First, check if a write barrier is even needed. The tests below
206   // catch stores of Smis.
207   Label done;
208 
209   // Skip barrier if writing a smi.
210   if (smi_check == INLINE_SMI_CHECK) {
211     JumpIfSmi(value, &done);
212   }
213 
214   // Although the object register is tagged, the offset is relative to the start
215   // of the object, so so offset must be a multiple of kPointerSize.
216   DCHECK(IsAligned(offset, kPointerSize));
217 
218   Addu(dst, object, Operand(offset - kHeapObjectTag));
219   if (emit_debug_code()) {
220     Label ok;
221     And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
222     Branch(&ok, eq, t8, Operand(zero_reg));
223     stop("Unaligned cell in write barrier");
224     bind(&ok);
225   }
226 
227   RecordWrite(object,
228               dst,
229               value,
230               ra_status,
231               save_fp,
232               remembered_set_action,
233               OMIT_SMI_CHECK,
234               pointers_to_here_check_for_value);
235 
236   bind(&done);
237 
238   // Clobber clobbered input registers when running with the debug-code flag
239   // turned on to provoke errors.
240   if (emit_debug_code()) {
241     li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
242     li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
243   }
244 }
245 
246 
247 // Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
RecordWriteForMap(Register object,Register map,Register dst,RAStatus ra_status,SaveFPRegsMode fp_mode)248 void MacroAssembler::RecordWriteForMap(Register object,
249                                        Register map,
250                                        Register dst,
251                                        RAStatus ra_status,
252                                        SaveFPRegsMode fp_mode) {
253   if (emit_debug_code()) {
254     DCHECK(!dst.is(at));
255     lw(dst, FieldMemOperand(map, HeapObject::kMapOffset));
256     Check(eq,
257           kWrongAddressOrValuePassedToRecordWrite,
258           dst,
259           Operand(isolate()->factory()->meta_map()));
260   }
261 
262   if (!FLAG_incremental_marking) {
263     return;
264   }
265 
266   if (emit_debug_code()) {
267     lw(at, FieldMemOperand(object, HeapObject::kMapOffset));
268     Check(eq,
269           kWrongAddressOrValuePassedToRecordWrite,
270           map,
271           Operand(at));
272   }
273 
274   Label done;
275 
276   // A single check of the map's pages interesting flag suffices, since it is
277   // only set during incremental collection, and then it's also guaranteed that
278   // the from object's page's interesting flag is also set.  This optimization
279   // relies on the fact that maps can never be in new space.
280   CheckPageFlag(map,
281                 map,  // Used as scratch.
282                 MemoryChunk::kPointersToHereAreInterestingMask,
283                 eq,
284                 &done);
285 
286   Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
287   if (emit_debug_code()) {
288     Label ok;
289     And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
290     Branch(&ok, eq, at, Operand(zero_reg));
291     stop("Unaligned cell in write barrier");
292     bind(&ok);
293   }
294 
295   // Record the actual write.
296   if (ra_status == kRAHasNotBeenSaved) {
297     push(ra);
298   }
299   RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
300                        fp_mode);
301   CallStub(&stub);
302   if (ra_status == kRAHasNotBeenSaved) {
303     pop(ra);
304   }
305 
306   bind(&done);
307 
308   // Count number of write barriers in generated code.
309   isolate()->counters()->write_barriers_static()->Increment();
310   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
311 
312   // Clobber clobbered registers when running with the debug-code flag
313   // turned on to provoke errors.
314   if (emit_debug_code()) {
315     li(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
316     li(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
317   }
318 }
319 
320 
321 // Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
322 // The register 'object' contains a heap object pointer.  The heap object
323 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,RAStatus ra_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)324 void MacroAssembler::RecordWrite(
325     Register object,
326     Register address,
327     Register value,
328     RAStatus ra_status,
329     SaveFPRegsMode fp_mode,
330     RememberedSetAction remembered_set_action,
331     SmiCheck smi_check,
332     PointersToHereCheck pointers_to_here_check_for_value) {
333   DCHECK(!AreAliased(object, address, value, t8));
334   DCHECK(!AreAliased(object, address, value, t9));
335 
336   if (emit_debug_code()) {
337     lw(at, MemOperand(address));
338     Assert(
339         eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
340   }
341 
342   if (remembered_set_action == OMIT_REMEMBERED_SET &&
343       !FLAG_incremental_marking) {
344     return;
345   }
346 
347   // First, check if a write barrier is even needed. The tests below
348   // catch stores of smis and stores into the young generation.
349   Label done;
350 
351   if (smi_check == INLINE_SMI_CHECK) {
352     DCHECK_EQ(0, kSmiTag);
353     JumpIfSmi(value, &done);
354   }
355 
356   if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
357     CheckPageFlag(value,
358                   value,  // Used as scratch.
359                   MemoryChunk::kPointersToHereAreInterestingMask,
360                   eq,
361                   &done);
362   }
363   CheckPageFlag(object,
364                 value,  // Used as scratch.
365                 MemoryChunk::kPointersFromHereAreInterestingMask,
366                 eq,
367                 &done);
368 
369   // Record the actual write.
370   if (ra_status == kRAHasNotBeenSaved) {
371     push(ra);
372   }
373   RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
374                        fp_mode);
375   CallStub(&stub);
376   if (ra_status == kRAHasNotBeenSaved) {
377     pop(ra);
378   }
379 
380   bind(&done);
381 
382   // Count number of write barriers in generated code.
383   isolate()->counters()->write_barriers_static()->Increment();
384   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
385                    value);
386 
387   // Clobber clobbered registers when running with the debug-code flag
388   // turned on to provoke errors.
389   if (emit_debug_code()) {
390     li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
391     li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
392   }
393 }
394 
RecordWriteCodeEntryField(Register js_function,Register code_entry,Register scratch)395 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
396                                                Register code_entry,
397                                                Register scratch) {
398   const int offset = JSFunction::kCodeEntryOffset;
399 
400   // Since a code entry (value) is always in old space, we don't need to update
401   // remembered set. If incremental marking is off, there is nothing for us to
402   // do.
403   if (!FLAG_incremental_marking) return;
404 
405   DCHECK(js_function.is(a1));
406   DCHECK(code_entry.is(t0));
407   DCHECK(scratch.is(t1));
408   AssertNotSmi(js_function);
409 
410   if (emit_debug_code()) {
411     Addu(scratch, js_function, Operand(offset - kHeapObjectTag));
412     lw(at, MemOperand(scratch));
413     Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
414            Operand(code_entry));
415   }
416 
417   // First, check if a write barrier is even needed. The tests below
418   // catch stores of Smis and stores into young gen.
419   Label done;
420 
421   CheckPageFlag(code_entry, scratch,
422                 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
423   CheckPageFlag(js_function, scratch,
424                 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
425 
426   const Register dst = scratch;
427   Addu(dst, js_function, Operand(offset - kHeapObjectTag));
428 
429   // Save caller-saved registers. js_function and code_entry are in the
430   // caller-saved register list.
431   DCHECK(kJSCallerSaved & js_function.bit());
432   DCHECK(kJSCallerSaved & code_entry.bit());
433   MultiPush(kJSCallerSaved | ra.bit());
434 
435   int argument_count = 3;
436 
437   PrepareCallCFunction(argument_count, 0, code_entry);
438 
439   mov(a0, js_function);
440   mov(a1, dst);
441   li(a2, Operand(ExternalReference::isolate_address(isolate())));
442 
443   {
444     AllowExternalCallThatCantCauseGC scope(this);
445     CallCFunction(
446         ExternalReference::incremental_marking_record_write_code_entry_function(
447             isolate()),
448         argument_count);
449   }
450 
451   // Restore caller-saved registers.
452   MultiPop(kJSCallerSaved | ra.bit());
453 
454   bind(&done);
455 }
456 
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)457 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
458                                          Register address,
459                                          Register scratch,
460                                          SaveFPRegsMode fp_mode,
461                                          RememberedSetFinalAction and_then) {
462   Label done;
463   if (emit_debug_code()) {
464     Label ok;
465     JumpIfNotInNewSpace(object, scratch, &ok);
466     stop("Remembered set pointer is in new space");
467     bind(&ok);
468   }
469   // Load store buffer top.
470   ExternalReference store_buffer =
471       ExternalReference::store_buffer_top(isolate());
472   li(t8, Operand(store_buffer));
473   lw(scratch, MemOperand(t8));
474   // Store pointer to buffer and increment buffer top.
475   sw(address, MemOperand(scratch));
476   Addu(scratch, scratch, kPointerSize);
477   // Write back new top of buffer.
478   sw(scratch, MemOperand(t8));
479   // Call stub on end of buffer.
480   // Check for end of buffer.
481   And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
482   if (and_then == kFallThroughAtEnd) {
483     Branch(&done, ne, t8, Operand(zero_reg));
484   } else {
485     DCHECK(and_then == kReturnAtEnd);
486     Ret(ne, t8, Operand(zero_reg));
487   }
488   push(ra);
489   StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
490   CallStub(&store_buffer_overflow);
491   pop(ra);
492   bind(&done);
493   if (and_then == kReturnAtEnd) {
494     Ret();
495   }
496 }
497 
498 
499 // -----------------------------------------------------------------------------
500 // Allocation support.
501 
502 
503 // Compute the hash code from the untagged key.  This must be kept in sync with
504 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
505 // code-stub-hydrogen.cc
GetNumberHash(Register reg0,Register scratch)506 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
507   // First of all we assign the hash seed to scratch.
508   LoadRoot(scratch, Heap::kHashSeedRootIndex);
509   SmiUntag(scratch);
510 
511   // Xor original key with a seed.
512   xor_(reg0, reg0, scratch);
513 
514   // Compute the hash code from the untagged key.  This must be kept in sync
515   // with ComputeIntegerHash in utils.h.
516   //
517   // hash = ~hash + (hash << 15);
518   nor(scratch, reg0, zero_reg);
519   Lsa(reg0, scratch, reg0, 15);
520 
521   // hash = hash ^ (hash >> 12);
522   srl(at, reg0, 12);
523   xor_(reg0, reg0, at);
524 
525   // hash = hash + (hash << 2);
526   Lsa(reg0, reg0, reg0, 2);
527 
528   // hash = hash ^ (hash >> 4);
529   srl(at, reg0, 4);
530   xor_(reg0, reg0, at);
531 
532   // hash = hash * 2057;
533   sll(scratch, reg0, 11);
534   Lsa(reg0, reg0, reg0, 3);
535   addu(reg0, reg0, scratch);
536 
537   // hash = hash ^ (hash >> 16);
538   srl(at, reg0, 16);
539   xor_(reg0, reg0, at);
540   And(reg0, reg0, Operand(0x3fffffff));
541 }
542 
543 // ---------------------------------------------------------------------------
544 // Instruction macros.
545 
Addu(Register rd,Register rs,const Operand & rt)546 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
547   if (rt.is_reg()) {
548     addu(rd, rs, rt.rm());
549   } else {
550     if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
551       addiu(rd, rs, rt.imm32_);
552     } else {
553       // li handles the relocation.
554       DCHECK(!rs.is(at));
555       li(at, rt);
556       addu(rd, rs, at);
557     }
558   }
559 }
560 
561 
Subu(Register rd,Register rs,const Operand & rt)562 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
563   if (rt.is_reg()) {
564     subu(rd, rs, rt.rm());
565   } else {
566     if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
567       addiu(rd, rs, -rt.imm32_);  // No subiu instr, use addiu(x, y, -imm).
568     } else {
569       // li handles the relocation.
570       DCHECK(!rs.is(at));
571       li(at, rt);
572       subu(rd, rs, at);
573     }
574   }
575 }
576 
577 
Mul(Register rd,Register rs,const Operand & rt)578 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
579   if (rt.is_reg()) {
580     if (IsMipsArchVariant(kLoongson)) {
581       mult(rs, rt.rm());
582       mflo(rd);
583     } else {
584       mul(rd, rs, rt.rm());
585     }
586   } else {
587     // li handles the relocation.
588     DCHECK(!rs.is(at));
589     li(at, rt);
590     if (IsMipsArchVariant(kLoongson)) {
591       mult(rs, at);
592       mflo(rd);
593     } else {
594       mul(rd, rs, at);
595     }
596   }
597 }
598 
599 
Mul(Register rd_hi,Register rd_lo,Register rs,const Operand & rt)600 void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
601     Register rs, const Operand& rt) {
602   if (rt.is_reg()) {
603     if (!IsMipsArchVariant(kMips32r6)) {
604       mult(rs, rt.rm());
605       mflo(rd_lo);
606       mfhi(rd_hi);
607     } else {
608       if (rd_lo.is(rs)) {
609         DCHECK(!rd_hi.is(rs));
610         DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
611         muh(rd_hi, rs, rt.rm());
612         mul(rd_lo, rs, rt.rm());
613       } else {
614         DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
615         mul(rd_lo, rs, rt.rm());
616         muh(rd_hi, rs, rt.rm());
617       }
618     }
619   } else {
620     // li handles the relocation.
621     DCHECK(!rs.is(at));
622     li(at, rt);
623     if (!IsMipsArchVariant(kMips32r6)) {
624       mult(rs, at);
625       mflo(rd_lo);
626       mfhi(rd_hi);
627     } else {
628       if (rd_lo.is(rs)) {
629         DCHECK(!rd_hi.is(rs));
630         DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
631         muh(rd_hi, rs, at);
632         mul(rd_lo, rs, at);
633       } else {
634         DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
635         mul(rd_lo, rs, at);
636         muh(rd_hi, rs, at);
637       }
638     }
639   }
640 }
641 
Mulu(Register rd_hi,Register rd_lo,Register rs,const Operand & rt)642 void MacroAssembler::Mulu(Register rd_hi, Register rd_lo, Register rs,
643                           const Operand& rt) {
644   Register reg;
645   if (rt.is_reg()) {
646     reg = rt.rm();
647   } else {
648     DCHECK(!rs.is(at));
649     reg = at;
650     li(reg, rt);
651   }
652 
653   if (!IsMipsArchVariant(kMips32r6)) {
654     multu(rs, reg);
655     mflo(rd_lo);
656     mfhi(rd_hi);
657   } else {
658     if (rd_lo.is(rs)) {
659       DCHECK(!rd_hi.is(rs));
660       DCHECK(!rd_hi.is(reg) && !rd_lo.is(reg));
661       muhu(rd_hi, rs, reg);
662       mulu(rd_lo, rs, reg);
663     } else {
664       DCHECK(!rd_hi.is(reg) && !rd_lo.is(reg));
665       mulu(rd_lo, rs, reg);
666       muhu(rd_hi, rs, reg);
667     }
668   }
669 }
670 
Mulh(Register rd,Register rs,const Operand & rt)671 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
672   if (rt.is_reg()) {
673     if (!IsMipsArchVariant(kMips32r6)) {
674       mult(rs, rt.rm());
675       mfhi(rd);
676     } else {
677       muh(rd, rs, rt.rm());
678     }
679   } else {
680     // li handles the relocation.
681     DCHECK(!rs.is(at));
682     li(at, rt);
683     if (!IsMipsArchVariant(kMips32r6)) {
684       mult(rs, at);
685       mfhi(rd);
686     } else {
687       muh(rd, rs, at);
688     }
689   }
690 }
691 
692 
Mult(Register rs,const Operand & rt)693 void MacroAssembler::Mult(Register rs, const Operand& rt) {
694   if (rt.is_reg()) {
695     mult(rs, rt.rm());
696   } else {
697     // li handles the relocation.
698     DCHECK(!rs.is(at));
699     li(at, rt);
700     mult(rs, at);
701   }
702 }
703 
704 
Mulhu(Register rd,Register rs,const Operand & rt)705 void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
706   if (rt.is_reg()) {
707     if (!IsMipsArchVariant(kMips32r6)) {
708       multu(rs, rt.rm());
709       mfhi(rd);
710     } else {
711       muhu(rd, rs, rt.rm());
712     }
713   } else {
714     // li handles the relocation.
715     DCHECK(!rs.is(at));
716     li(at, rt);
717     if (!IsMipsArchVariant(kMips32r6)) {
718       multu(rs, at);
719       mfhi(rd);
720     } else {
721       muhu(rd, rs, at);
722     }
723   }
724 }
725 
726 
Multu(Register rs,const Operand & rt)727 void MacroAssembler::Multu(Register rs, const Operand& rt) {
728   if (rt.is_reg()) {
729     multu(rs, rt.rm());
730   } else {
731     // li handles the relocation.
732     DCHECK(!rs.is(at));
733     li(at, rt);
734     multu(rs, at);
735   }
736 }
737 
738 
Div(Register rs,const Operand & rt)739 void MacroAssembler::Div(Register rs, const Operand& rt) {
740   if (rt.is_reg()) {
741     div(rs, rt.rm());
742   } else {
743     // li handles the relocation.
744     DCHECK(!rs.is(at));
745     li(at, rt);
746     div(rs, at);
747   }
748 }
749 
750 
Div(Register rem,Register res,Register rs,const Operand & rt)751 void MacroAssembler::Div(Register rem, Register res,
752     Register rs, const Operand& rt) {
753   if (rt.is_reg()) {
754     if (!IsMipsArchVariant(kMips32r6)) {
755       div(rs, rt.rm());
756       mflo(res);
757       mfhi(rem);
758     } else {
759       div(res, rs, rt.rm());
760       mod(rem, rs, rt.rm());
761     }
762   } else {
763     // li handles the relocation.
764     DCHECK(!rs.is(at));
765     li(at, rt);
766     if (!IsMipsArchVariant(kMips32r6)) {
767       div(rs, at);
768       mflo(res);
769       mfhi(rem);
770     } else {
771       div(res, rs, at);
772       mod(rem, rs, at);
773     }
774   }
775 }
776 
777 
Div(Register res,Register rs,const Operand & rt)778 void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
779   if (rt.is_reg()) {
780     if (!IsMipsArchVariant(kMips32r6)) {
781       div(rs, rt.rm());
782       mflo(res);
783     } else {
784       div(res, rs, rt.rm());
785     }
786   } else {
787     // li handles the relocation.
788     DCHECK(!rs.is(at));
789     li(at, rt);
790     if (!IsMipsArchVariant(kMips32r6)) {
791       div(rs, at);
792       mflo(res);
793     } else {
794       div(res, rs, at);
795     }
796   }
797 }
798 
799 
Mod(Register rd,Register rs,const Operand & rt)800 void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
801   if (rt.is_reg()) {
802     if (!IsMipsArchVariant(kMips32r6)) {
803       div(rs, rt.rm());
804       mfhi(rd);
805     } else {
806       mod(rd, rs, rt.rm());
807     }
808   } else {
809     // li handles the relocation.
810     DCHECK(!rs.is(at));
811     li(at, rt);
812     if (!IsMipsArchVariant(kMips32r6)) {
813       div(rs, at);
814       mfhi(rd);
815     } else {
816       mod(rd, rs, at);
817     }
818   }
819 }
820 
821 
Modu(Register rd,Register rs,const Operand & rt)822 void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
823   if (rt.is_reg()) {
824     if (!IsMipsArchVariant(kMips32r6)) {
825       divu(rs, rt.rm());
826       mfhi(rd);
827     } else {
828       modu(rd, rs, rt.rm());
829     }
830   } else {
831     // li handles the relocation.
832     DCHECK(!rs.is(at));
833     li(at, rt);
834     if (!IsMipsArchVariant(kMips32r6)) {
835       divu(rs, at);
836       mfhi(rd);
837     } else {
838       modu(rd, rs, at);
839     }
840   }
841 }
842 
843 
Divu(Register rs,const Operand & rt)844 void MacroAssembler::Divu(Register rs, const Operand& rt) {
845   if (rt.is_reg()) {
846     divu(rs, rt.rm());
847   } else {
848     // li handles the relocation.
849     DCHECK(!rs.is(at));
850     li(at, rt);
851     divu(rs, at);
852   }
853 }
854 
855 
Divu(Register res,Register rs,const Operand & rt)856 void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
857   if (rt.is_reg()) {
858     if (!IsMipsArchVariant(kMips32r6)) {
859       divu(rs, rt.rm());
860       mflo(res);
861     } else {
862       divu(res, rs, rt.rm());
863     }
864   } else {
865     // li handles the relocation.
866     DCHECK(!rs.is(at));
867     li(at, rt);
868     if (!IsMipsArchVariant(kMips32r6)) {
869       divu(rs, at);
870       mflo(res);
871     } else {
872       divu(res, rs, at);
873     }
874   }
875 }
876 
877 
And(Register rd,Register rs,const Operand & rt)878 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
879   if (rt.is_reg()) {
880     and_(rd, rs, rt.rm());
881   } else {
882     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
883       andi(rd, rs, rt.imm32_);
884     } else {
885       // li handles the relocation.
886       DCHECK(!rs.is(at));
887       li(at, rt);
888       and_(rd, rs, at);
889     }
890   }
891 }
892 
893 
Or(Register rd,Register rs,const Operand & rt)894 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
895   if (rt.is_reg()) {
896     or_(rd, rs, rt.rm());
897   } else {
898     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
899       ori(rd, rs, rt.imm32_);
900     } else {
901       // li handles the relocation.
902       DCHECK(!rs.is(at));
903       li(at, rt);
904       or_(rd, rs, at);
905     }
906   }
907 }
908 
909 
Xor(Register rd,Register rs,const Operand & rt)910 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
911   if (rt.is_reg()) {
912     xor_(rd, rs, rt.rm());
913   } else {
914     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
915       xori(rd, rs, rt.imm32_);
916     } else {
917       // li handles the relocation.
918       DCHECK(!rs.is(at));
919       li(at, rt);
920       xor_(rd, rs, at);
921     }
922   }
923 }
924 
925 
Nor(Register rd,Register rs,const Operand & rt)926 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
927   if (rt.is_reg()) {
928     nor(rd, rs, rt.rm());
929   } else {
930     // li handles the relocation.
931     DCHECK(!rs.is(at));
932     li(at, rt);
933     nor(rd, rs, at);
934   }
935 }
936 
937 
Neg(Register rs,const Operand & rt)938 void MacroAssembler::Neg(Register rs, const Operand& rt) {
939   DCHECK(rt.is_reg());
940   DCHECK(!at.is(rs));
941   DCHECK(!at.is(rt.rm()));
942   li(at, -1);
943   xor_(rs, rt.rm(), at);
944 }
945 
946 
Slt(Register rd,Register rs,const Operand & rt)947 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
948   if (rt.is_reg()) {
949     slt(rd, rs, rt.rm());
950   } else {
951     if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
952       slti(rd, rs, rt.imm32_);
953     } else {
954       // li handles the relocation.
955       DCHECK(!rs.is(at));
956       li(at, rt);
957       slt(rd, rs, at);
958     }
959   }
960 }
961 
962 
Sltu(Register rd,Register rs,const Operand & rt)963 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
964   if (rt.is_reg()) {
965     sltu(rd, rs, rt.rm());
966   } else {
967     const uint32_t int16_min = std::numeric_limits<int16_t>::min();
968     if (is_uint15(rt.imm32_) && !MustUseReg(rt.rmode_)) {
969       // Imm range is: [0, 32767].
970       sltiu(rd, rs, rt.imm32_);
971     } else if (is_uint15(rt.imm32_ - int16_min) && !MustUseReg(rt.rmode_)) {
972       // Imm range is: [max_unsigned-32767,max_unsigned].
973       sltiu(rd, rs, static_cast<uint16_t>(rt.imm32_));
974     } else {
975       // li handles the relocation.
976       DCHECK(!rs.is(at));
977       li(at, rt);
978       sltu(rd, rs, at);
979     }
980   }
981 }
982 
983 
Ror(Register rd,Register rs,const Operand & rt)984 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
985   if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
986     if (rt.is_reg()) {
987       rotrv(rd, rs, rt.rm());
988     } else {
989       rotr(rd, rs, rt.imm32_ & 0x1f);
990     }
991   } else {
992     if (rt.is_reg()) {
993       subu(at, zero_reg, rt.rm());
994       sllv(at, rs, at);
995       srlv(rd, rs, rt.rm());
996       or_(rd, rd, at);
997     } else {
998       if (rt.imm32_ == 0) {
999         srl(rd, rs, 0);
1000       } else {
1001         srl(at, rs, rt.imm32_ & 0x1f);
1002         sll(rd, rs, (0x20 - (rt.imm32_ & 0x1f)) & 0x1f);
1003         or_(rd, rd, at);
1004       }
1005     }
1006   }
1007 }
1008 
1009 
Pref(int32_t hint,const MemOperand & rs)1010 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1011   if (IsMipsArchVariant(kLoongson)) {
1012     lw(zero_reg, rs);
1013   } else {
1014     pref(hint, rs);
1015   }
1016 }
1017 
1018 
Lsa(Register rd,Register rt,Register rs,uint8_t sa,Register scratch)1019 void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
1020                          Register scratch) {
1021   DCHECK(sa >= 1 && sa <= 31);
1022   if (IsMipsArchVariant(kMips32r6) && sa <= 4) {
1023     lsa(rd, rt, rs, sa - 1);
1024   } else {
1025     Register tmp = rd.is(rt) ? scratch : rd;
1026     DCHECK(!tmp.is(rt));
1027     sll(tmp, rs, sa);
1028     Addu(rd, rt, tmp);
1029   }
1030 }
1031 
Bovc(Register rs,Register rt,Label * L)1032 void MacroAssembler::Bovc(Register rs, Register rt, Label* L) {
1033   if (is_trampoline_emitted()) {
1034     Label skip;
1035     bnvc(rs, rt, &skip);
1036     BranchLong(L, PROTECT);
1037     bind(&skip);
1038   } else {
1039     bovc(rs, rt, L);
1040   }
1041 }
1042 
Bnvc(Register rs,Register rt,Label * L)1043 void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) {
1044   if (is_trampoline_emitted()) {
1045     Label skip;
1046     bovc(rs, rt, &skip);
1047     BranchLong(L, PROTECT);
1048     bind(&skip);
1049   } else {
1050     bnvc(rs, rt, L);
1051   }
1052 }
1053 
1054 // ------------Pseudo-instructions-------------
1055 
1056 // Word Swap Byte
ByteSwapSigned(Register dest,Register src,int operand_size)1057 void MacroAssembler::ByteSwapSigned(Register dest, Register src,
1058                                     int operand_size) {
1059   DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
1060 
1061   if (operand_size == 2) {
1062     Seh(src, src);
1063   } else if (operand_size == 1) {
1064     Seb(src, src);
1065   }
1066   // No need to do any preparation if operand_size is 4
1067 
1068   if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1069     wsbh(dest, src);
1070     rotr(dest, dest, 16);
1071   } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
1072     Register tmp = t0;
1073     Register tmp2 = t1;
1074 
1075     andi(tmp2, src, 0xFF);
1076     sll(tmp2, tmp2, 24);
1077     or_(tmp, zero_reg, tmp2);
1078 
1079     andi(tmp2, src, 0xFF00);
1080     sll(tmp2, tmp2, 8);
1081     or_(tmp, tmp, tmp2);
1082 
1083     srl(src, src, 8);
1084     andi(tmp2, src, 0xFF00);
1085     or_(tmp, tmp, tmp2);
1086 
1087     srl(src, src, 16);
1088     andi(tmp2, src, 0xFF);
1089     or_(tmp, tmp, tmp2);
1090 
1091     or_(dest, tmp, zero_reg);
1092   }
1093 }
1094 
ByteSwapUnsigned(Register dest,Register src,int operand_size)1095 void MacroAssembler::ByteSwapUnsigned(Register dest, Register src,
1096                                       int operand_size) {
1097   DCHECK(operand_size == 1 || operand_size == 2);
1098 
1099   if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1100     if (operand_size == 1) {
1101       andi(src, src, 0xFF);
1102     } else {
1103       andi(src, src, 0xFFFF);
1104     }
1105     // No need to do any preparation if operand_size is 4
1106 
1107     wsbh(dest, src);
1108     rotr(dest, dest, 16);
1109   } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
1110     if (operand_size == 1) {
1111       sll(src, src, 24);
1112     } else {
1113       Register tmp = t0;
1114 
1115       andi(tmp, src, 0xFF00);
1116       sll(src, src, 24);
1117       sll(tmp, tmp, 8);
1118       or_(dest, tmp, src);
1119     }
1120   }
1121 }
1122 
Ulw(Register rd,const MemOperand & rs)1123 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1124   DCHECK(!rd.is(at));
1125   DCHECK(!rs.rm().is(at));
1126   if (IsMipsArchVariant(kMips32r6)) {
1127     lw(rd, rs);
1128   } else {
1129     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1130            IsMipsArchVariant(kLoongson));
1131     if (is_int16(rs.offset() + kMipsLwrOffset) &&
1132         is_int16(rs.offset() + kMipsLwlOffset)) {
1133       if (!rd.is(rs.rm())) {
1134         lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1135         lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1136       } else {
1137         lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1138         lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1139         mov(rd, at);
1140       }
1141     } else {  // Offset > 16 bits, use multiple instructions to load.
1142       LoadRegPlusOffsetToAt(rs);
1143       lwr(rd, MemOperand(at, kMipsLwrOffset));
1144       lwl(rd, MemOperand(at, kMipsLwlOffset));
1145     }
1146   }
1147 }
1148 
1149 
Usw(Register rd,const MemOperand & rs)1150 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1151   DCHECK(!rd.is(at));
1152   DCHECK(!rs.rm().is(at));
1153   if (IsMipsArchVariant(kMips32r6)) {
1154     sw(rd, rs);
1155   } else {
1156     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1157            IsMipsArchVariant(kLoongson));
1158     if (is_int16(rs.offset() + kMipsSwrOffset) &&
1159         is_int16(rs.offset() + kMipsSwlOffset)) {
1160       swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
1161       swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
1162     } else {
1163       LoadRegPlusOffsetToAt(rs);
1164       swr(rd, MemOperand(at, kMipsSwrOffset));
1165       swl(rd, MemOperand(at, kMipsSwlOffset));
1166     }
1167   }
1168 }
1169 
Ulh(Register rd,const MemOperand & rs)1170 void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
1171   DCHECK(!rd.is(at));
1172   DCHECK(!rs.rm().is(at));
1173   if (IsMipsArchVariant(kMips32r6)) {
1174     lh(rd, rs);
1175   } else {
1176     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1177            IsMipsArchVariant(kLoongson));
1178     if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1179 #if defined(V8_TARGET_LITTLE_ENDIAN)
1180       lbu(at, rs);
1181       lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
1182 #elif defined(V8_TARGET_BIG_ENDIAN)
1183       lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1184       lb(rd, rs);
1185 #endif
1186     } else {  // Offset > 16 bits, use multiple instructions to load.
1187       LoadRegPlusOffsetToAt(rs);
1188 #if defined(V8_TARGET_LITTLE_ENDIAN)
1189       lb(rd, MemOperand(at, 1));
1190       lbu(at, MemOperand(at, 0));
1191 #elif defined(V8_TARGET_BIG_ENDIAN)
1192       lb(rd, MemOperand(at, 0));
1193       lbu(at, MemOperand(at, 1));
1194 #endif
1195     }
1196     sll(rd, rd, 8);
1197     or_(rd, rd, at);
1198   }
1199 }
1200 
Ulhu(Register rd,const MemOperand & rs)1201 void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
1202   DCHECK(!rd.is(at));
1203   DCHECK(!rs.rm().is(at));
1204   if (IsMipsArchVariant(kMips32r6)) {
1205     lhu(rd, rs);
1206   } else {
1207     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1208            IsMipsArchVariant(kLoongson));
1209     if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1210 #if defined(V8_TARGET_LITTLE_ENDIAN)
1211       lbu(at, rs);
1212       lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
1213 #elif defined(V8_TARGET_BIG_ENDIAN)
1214       lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1215       lbu(rd, rs);
1216 #endif
1217     } else {  // Offset > 16 bits, use multiple instructions to load.
1218       LoadRegPlusOffsetToAt(rs);
1219 #if defined(V8_TARGET_LITTLE_ENDIAN)
1220       lbu(rd, MemOperand(at, 1));
1221       lbu(at, MemOperand(at, 0));
1222 #elif defined(V8_TARGET_BIG_ENDIAN)
1223       lbu(rd, MemOperand(at, 0));
1224       lbu(at, MemOperand(at, 1));
1225 #endif
1226     }
1227     sll(rd, rd, 8);
1228     or_(rd, rd, at);
1229   }
1230 }
1231 
Ush(Register rd,const MemOperand & rs,Register scratch)1232 void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
1233   DCHECK(!rd.is(at));
1234   DCHECK(!rs.rm().is(at));
1235   DCHECK(!rs.rm().is(scratch));
1236   DCHECK(!scratch.is(at));
1237   if (IsMipsArchVariant(kMips32r6)) {
1238     sh(rd, rs);
1239   } else {
1240     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1241            IsMipsArchVariant(kLoongson));
1242     MemOperand source = rs;
1243     // If offset > 16 bits, load address to at with offset 0.
1244     if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
1245       LoadRegPlusOffsetToAt(rs);
1246       source = MemOperand(at, 0);
1247     }
1248 
1249     if (!scratch.is(rd)) {
1250       mov(scratch, rd);
1251     }
1252 
1253 #if defined(V8_TARGET_LITTLE_ENDIAN)
1254     sb(scratch, source);
1255     srl(scratch, scratch, 8);
1256     sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1257 #elif defined(V8_TARGET_BIG_ENDIAN)
1258     sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1259     srl(scratch, scratch, 8);
1260     sb(scratch, source);
1261 #endif
1262   }
1263 }
1264 
Ulwc1(FPURegister fd,const MemOperand & rs,Register scratch)1265 void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
1266                            Register scratch) {
1267   if (IsMipsArchVariant(kMips32r6)) {
1268     lwc1(fd, rs);
1269   } else {
1270     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1271            IsMipsArchVariant(kLoongson));
1272     Ulw(scratch, rs);
1273     mtc1(scratch, fd);
1274   }
1275 }
1276 
Uswc1(FPURegister fd,const MemOperand & rs,Register scratch)1277 void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
1278                            Register scratch) {
1279   if (IsMipsArchVariant(kMips32r6)) {
1280     swc1(fd, rs);
1281   } else {
1282     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1283            IsMipsArchVariant(kLoongson));
1284     mfc1(scratch, fd);
1285     Usw(scratch, rs);
1286   }
1287 }
1288 
Uldc1(FPURegister fd,const MemOperand & rs,Register scratch)1289 void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
1290                            Register scratch) {
1291   DCHECK(!scratch.is(at));
1292   if (IsMipsArchVariant(kMips32r6)) {
1293     ldc1(fd, rs);
1294   } else {
1295     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1296            IsMipsArchVariant(kLoongson));
1297     Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
1298     mtc1(scratch, fd);
1299     Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
1300     Mthc1(scratch, fd);
1301   }
1302 }
1303 
Usdc1(FPURegister fd,const MemOperand & rs,Register scratch)1304 void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
1305                            Register scratch) {
1306   DCHECK(!scratch.is(at));
1307   if (IsMipsArchVariant(kMips32r6)) {
1308     sdc1(fd, rs);
1309   } else {
1310     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1311            IsMipsArchVariant(kLoongson));
1312     mfc1(scratch, fd);
1313     Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
1314     Mfhc1(scratch, fd);
1315     Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
1316   }
1317 }
1318 
1319 
li(Register dst,Handle<Object> value,LiFlags mode)1320 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1321   li(dst, Operand(value), mode);
1322 }
1323 
1324 
li(Register rd,Operand j,LiFlags mode)1325 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1326   DCHECK(!j.is_reg());
1327   BlockTrampolinePoolScope block_trampoline_pool(this);
1328   if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1329     // Normal load of an immediate value which does not need Relocation Info.
1330     if (is_int16(j.imm32_)) {
1331       addiu(rd, zero_reg, j.imm32_);
1332     } else if (!(j.imm32_ & kHiMask)) {
1333       ori(rd, zero_reg, j.imm32_);
1334     } else if (!(j.imm32_ & kImm16Mask)) {
1335       lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1336     } else {
1337       lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1338       ori(rd, rd, (j.imm32_ & kImm16Mask));
1339     }
1340   } else {
1341     if (MustUseReg(j.rmode_)) {
1342       RecordRelocInfo(j.rmode_, j.imm32_);
1343     }
1344     // We always need the same number of instructions as we may need to patch
1345     // this code to load another value which may need 2 instructions to load.
1346     lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1347     ori(rd, rd, (j.imm32_ & kImm16Mask));
1348   }
1349 }
1350 
1351 
MultiPush(RegList regs)1352 void MacroAssembler::MultiPush(RegList regs) {
1353   int16_t num_to_push = NumberOfBitsSet(regs);
1354   int16_t stack_offset = num_to_push * kPointerSize;
1355 
1356   Subu(sp, sp, Operand(stack_offset));
1357   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1358     if ((regs & (1 << i)) != 0) {
1359       stack_offset -= kPointerSize;
1360       sw(ToRegister(i), MemOperand(sp, stack_offset));
1361     }
1362   }
1363 }
1364 
1365 
MultiPushReversed(RegList regs)1366 void MacroAssembler::MultiPushReversed(RegList regs) {
1367   int16_t num_to_push = NumberOfBitsSet(regs);
1368   int16_t stack_offset = num_to_push * kPointerSize;
1369 
1370   Subu(sp, sp, Operand(stack_offset));
1371   for (int16_t i = 0; i < kNumRegisters; i++) {
1372     if ((regs & (1 << i)) != 0) {
1373       stack_offset -= kPointerSize;
1374       sw(ToRegister(i), MemOperand(sp, stack_offset));
1375     }
1376   }
1377 }
1378 
1379 
MultiPop(RegList regs)1380 void MacroAssembler::MultiPop(RegList regs) {
1381   int16_t stack_offset = 0;
1382 
1383   for (int16_t i = 0; i < kNumRegisters; i++) {
1384     if ((regs & (1 << i)) != 0) {
1385       lw(ToRegister(i), MemOperand(sp, stack_offset));
1386       stack_offset += kPointerSize;
1387     }
1388   }
1389   addiu(sp, sp, stack_offset);
1390 }
1391 
1392 
MultiPopReversed(RegList regs)1393 void MacroAssembler::MultiPopReversed(RegList regs) {
1394   int16_t stack_offset = 0;
1395 
1396   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1397     if ((regs & (1 << i)) != 0) {
1398       lw(ToRegister(i), MemOperand(sp, stack_offset));
1399       stack_offset += kPointerSize;
1400     }
1401   }
1402   addiu(sp, sp, stack_offset);
1403 }
1404 
1405 
MultiPushFPU(RegList regs)1406 void MacroAssembler::MultiPushFPU(RegList regs) {
1407   int16_t num_to_push = NumberOfBitsSet(regs);
1408   int16_t stack_offset = num_to_push * kDoubleSize;
1409 
1410   Subu(sp, sp, Operand(stack_offset));
1411   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1412     if ((regs & (1 << i)) != 0) {
1413       stack_offset -= kDoubleSize;
1414       sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1415     }
1416   }
1417 }
1418 
1419 
MultiPushReversedFPU(RegList regs)1420 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1421   int16_t num_to_push = NumberOfBitsSet(regs);
1422   int16_t stack_offset = num_to_push * kDoubleSize;
1423 
1424   Subu(sp, sp, Operand(stack_offset));
1425   for (int16_t i = 0; i < kNumRegisters; i++) {
1426     if ((regs & (1 << i)) != 0) {
1427       stack_offset -= kDoubleSize;
1428       sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1429     }
1430   }
1431 }
1432 
1433 
MultiPopFPU(RegList regs)1434 void MacroAssembler::MultiPopFPU(RegList regs) {
1435   int16_t stack_offset = 0;
1436 
1437   for (int16_t i = 0; i < kNumRegisters; i++) {
1438     if ((regs & (1 << i)) != 0) {
1439       ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1440       stack_offset += kDoubleSize;
1441     }
1442   }
1443   addiu(sp, sp, stack_offset);
1444 }
1445 
1446 
MultiPopReversedFPU(RegList regs)1447 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1448   int16_t stack_offset = 0;
1449 
1450   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1451     if ((regs & (1 << i)) != 0) {
1452       ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1453       stack_offset += kDoubleSize;
1454     }
1455   }
1456   addiu(sp, sp, stack_offset);
1457 }
1458 
AddPair(Register dst_low,Register dst_high,Register left_low,Register left_high,Register right_low,Register right_high)1459 void MacroAssembler::AddPair(Register dst_low, Register dst_high,
1460                              Register left_low, Register left_high,
1461                              Register right_low, Register right_high) {
1462   Label no_overflow;
1463   Register kScratchReg = s3;
1464   Register kScratchReg2 = s4;
1465   // Add lower word
1466   Addu(dst_low, left_low, right_low);
1467   Addu(dst_high, left_high, right_high);
1468   // Check for lower word unsigned overflow
1469   Sltu(kScratchReg, dst_low, left_low);
1470   Sltu(kScratchReg2, dst_low, right_low);
1471   Or(kScratchReg, kScratchReg2, kScratchReg);
1472   Branch(&no_overflow, eq, kScratchReg, Operand(zero_reg));
1473   // Increment higher word if there was overflow
1474   Addu(dst_high, dst_high, 0x1);
1475   bind(&no_overflow);
1476 }
1477 
SubPair(Register dst_low,Register dst_high,Register left_low,Register left_high,Register right_low,Register right_high)1478 void MacroAssembler::SubPair(Register dst_low, Register dst_high,
1479                              Register left_low, Register left_high,
1480                              Register right_low, Register right_high) {
1481   Label no_overflow;
1482   Register kScratchReg = s3;
1483   // Subtract lower word
1484   Subu(dst_low, left_low, right_low);
1485   Subu(dst_high, left_high, right_high);
1486   // Check for lower word unsigned underflow
1487   Sltu(kScratchReg, left_low, right_low);
1488   Branch(&no_overflow, eq, kScratchReg, Operand(zero_reg));
1489   // Decrement higher word if there was underflow
1490   Subu(dst_high, dst_high, 0x1);
1491   bind(&no_overflow);
1492 }
1493 
ShlPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register shift)1494 void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
1495                              Register src_low, Register src_high,
1496                              Register shift) {
1497   Label less_than_32;
1498   Label zero_shift;
1499   Label word_shift;
1500   Label done;
1501   Register kScratchReg = s3;
1502   And(shift, shift, 0x3F);
1503   li(kScratchReg, 0x20);
1504   Branch(&less_than_32, lt, shift, Operand(kScratchReg));
1505 
1506   Branch(&word_shift, eq, shift, Operand(kScratchReg));
1507   // Shift more than 32
1508   Subu(kScratchReg, shift, kScratchReg);
1509   mov(dst_low, zero_reg);
1510   sllv(dst_high, src_low, kScratchReg);
1511   Branch(&done);
1512   // Word shift
1513   bind(&word_shift);
1514   mov(dst_low, zero_reg);
1515   mov(dst_high, src_low);
1516   Branch(&done);
1517 
1518   bind(&less_than_32);
1519   // Check if zero shift
1520   Branch(&zero_shift, eq, shift, Operand(zero_reg));
1521   // Shift less than 32
1522   Subu(kScratchReg, kScratchReg, shift);
1523   sllv(dst_high, src_high, shift);
1524   sllv(dst_low, src_low, shift);
1525   srlv(kScratchReg, src_low, kScratchReg);
1526   Or(dst_high, dst_high, kScratchReg);
1527   Branch(&done);
1528   // Zero shift
1529   bind(&zero_shift);
1530   mov(dst_low, src_low);
1531   mov(dst_high, src_high);
1532   bind(&done);
1533 }
1534 
ShlPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1535 void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
1536                              Register src_low, Register src_high,
1537                              uint32_t shift) {
1538   Register kScratchReg = s3;
1539   shift = shift & 0x3F;
1540   if (shift < 32) {
1541     if (shift == 0) {
1542       mov(dst_low, src_low);
1543       mov(dst_high, src_high);
1544     } else {
1545       sll(dst_high, src_high, shift);
1546       sll(dst_low, src_low, shift);
1547       shift = 32 - shift;
1548       srl(kScratchReg, src_low, shift);
1549       Or(dst_high, dst_high, kScratchReg);
1550     }
1551   } else {
1552     if (shift == 32) {
1553       mov(dst_low, zero_reg);
1554       mov(dst_high, src_low);
1555     } else {
1556       shift = shift - 32;
1557       mov(dst_low, zero_reg);
1558       sll(dst_high, src_low, shift);
1559     }
1560   }
1561 }
1562 
ShrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register shift)1563 void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
1564                              Register src_low, Register src_high,
1565                              Register shift) {
1566   Label less_than_32;
1567   Label zero_shift;
1568   Label word_shift;
1569   Label done;
1570   Register kScratchReg = s3;
1571   And(shift, shift, 0x3F);
1572   li(kScratchReg, 0x20);
1573   Branch(&less_than_32, lt, shift, Operand(kScratchReg));
1574 
1575   Branch(&word_shift, eq, shift, Operand(kScratchReg));
1576   // Shift more than 32
1577   Subu(kScratchReg, shift, kScratchReg);
1578   mov(dst_high, zero_reg);
1579   srlv(dst_low, src_high, kScratchReg);
1580   Branch(&done);
1581   // Word shift
1582   bind(&word_shift);
1583   mov(dst_high, zero_reg);
1584   mov(dst_low, src_high);
1585   Branch(&done);
1586 
1587   bind(&less_than_32);
1588   // Check if zero shift
1589   Branch(&zero_shift, eq, shift, Operand(zero_reg));
1590   // Shift less than 32
1591   Subu(kScratchReg, kScratchReg, shift);
1592   srlv(dst_high, src_high, shift);
1593   srlv(dst_low, src_low, shift);
1594   sllv(kScratchReg, src_high, kScratchReg);
1595   Or(dst_low, dst_low, kScratchReg);
1596   Branch(&done);
1597   // Zero shift
1598   bind(&zero_shift);
1599   mov(dst_low, src_low);
1600   mov(dst_high, src_high);
1601   bind(&done);
1602 }
1603 
ShrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1604 void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
1605                              Register src_low, Register src_high,
1606                              uint32_t shift) {
1607   Register kScratchReg = s3;
1608   shift = shift & 0x3F;
1609   if (shift < 32) {
1610     if (shift == 0) {
1611       mov(dst_low, src_low);
1612       mov(dst_high, src_high);
1613     } else {
1614       srl(dst_high, src_high, shift);
1615       srl(dst_low, src_low, shift);
1616       shift = 32 - shift;
1617       sll(kScratchReg, src_high, shift);
1618       Or(dst_low, dst_low, kScratchReg);
1619     }
1620   } else {
1621     if (shift == 32) {
1622       mov(dst_high, zero_reg);
1623       mov(dst_low, src_high);
1624     } else {
1625       shift = shift - 32;
1626       mov(dst_high, zero_reg);
1627       srl(dst_low, src_high, shift);
1628     }
1629   }
1630 }
1631 
SarPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register shift)1632 void MacroAssembler::SarPair(Register dst_low, Register dst_high,
1633                              Register src_low, Register src_high,
1634                              Register shift) {
1635   Label less_than_32;
1636   Label zero_shift;
1637   Label word_shift;
1638   Label done;
1639   Register kScratchReg = s3;
1640   Register kScratchReg2 = s4;
1641   And(shift, shift, 0x3F);
1642   li(kScratchReg, 0x20);
1643   Branch(&less_than_32, lt, shift, Operand(kScratchReg));
1644 
1645   Branch(&word_shift, eq, shift, Operand(kScratchReg));
1646 
1647   // Shift more than 32
1648   li(kScratchReg2, 0x1F);
1649   Subu(kScratchReg, shift, kScratchReg);
1650   srav(dst_high, src_high, kScratchReg2);
1651   srav(dst_low, src_high, kScratchReg);
1652   Branch(&done);
1653   // Word shift
1654   bind(&word_shift);
1655   li(kScratchReg2, 0x1F);
1656   srav(dst_high, src_high, kScratchReg2);
1657   mov(dst_low, src_high);
1658   Branch(&done);
1659 
1660   bind(&less_than_32);
1661   // Check if zero shift
1662   Branch(&zero_shift, eq, shift, Operand(zero_reg));
1663 
1664   // Shift less than 32
1665   Subu(kScratchReg, kScratchReg, shift);
1666   srav(dst_high, src_high, shift);
1667   srlv(dst_low, src_low, shift);
1668   sllv(kScratchReg, src_high, kScratchReg);
1669   Or(dst_low, dst_low, kScratchReg);
1670   Branch(&done);
1671   // Zero shift
1672   bind(&zero_shift);
1673   mov(dst_low, src_low);
1674   mov(dst_high, src_high);
1675   bind(&done);
1676 }
1677 
SarPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1678 void MacroAssembler::SarPair(Register dst_low, Register dst_high,
1679                              Register src_low, Register src_high,
1680                              uint32_t shift) {
1681   Register kScratchReg = s3;
1682   shift = shift & 0x3F;
1683   if (shift < 32) {
1684     if (shift == 0) {
1685       mov(dst_low, src_low);
1686       mov(dst_high, src_high);
1687     } else {
1688       sra(dst_high, src_high, shift);
1689       srl(dst_low, src_low, shift);
1690       shift = 32 - shift;
1691       sll(kScratchReg, src_high, shift);
1692       Or(dst_low, dst_low, kScratchReg);
1693     }
1694   } else {
1695     if (shift == 32) {
1696       sra(dst_high, src_high, 31);
1697       mov(dst_low, src_high);
1698     } else {
1699       shift = shift - 32;
1700       sra(dst_high, src_high, 31);
1701       sra(dst_low, src_high, shift);
1702     }
1703   }
1704 }
1705 
Ext(Register rt,Register rs,uint16_t pos,uint16_t size)1706 void MacroAssembler::Ext(Register rt,
1707                          Register rs,
1708                          uint16_t pos,
1709                          uint16_t size) {
1710   DCHECK(pos < 32);
1711   DCHECK(pos + size < 33);
1712 
1713   if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1714     ext_(rt, rs, pos, size);
1715   } else {
1716     // Move rs to rt and shift it left then right to get the
1717     // desired bitfield on the right side and zeroes on the left.
1718     int shift_left = 32 - (pos + size);
1719     sll(rt, rs, shift_left);  // Acts as a move if shift_left == 0.
1720 
1721     int shift_right = 32 - size;
1722     if (shift_right > 0) {
1723       srl(rt, rt, shift_right);
1724     }
1725   }
1726 }
1727 
1728 
Ins(Register rt,Register rs,uint16_t pos,uint16_t size)1729 void MacroAssembler::Ins(Register rt,
1730                          Register rs,
1731                          uint16_t pos,
1732                          uint16_t size) {
1733   DCHECK(pos < 32);
1734   DCHECK(pos + size <= 32);
1735   DCHECK(size != 0);
1736 
1737   if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1738     ins_(rt, rs, pos, size);
1739   } else {
1740     DCHECK(!rt.is(t8) && !rs.is(t8));
1741     Subu(at, zero_reg, Operand(1));
1742     srl(at, at, 32 - size);
1743     and_(t8, rs, at);
1744     sll(t8, t8, pos);
1745     sll(at, at, pos);
1746     nor(at, at, zero_reg);
1747     and_(at, rt, at);
1748     or_(rt, t8, at);
1749   }
1750 }
1751 
Seb(Register rd,Register rt)1752 void MacroAssembler::Seb(Register rd, Register rt) {
1753   if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1754     seb(rd, rt);
1755   } else {
1756     DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson));
1757     sll(rd, rt, 24);
1758     sra(rd, rd, 24);
1759   }
1760 }
1761 
Seh(Register rd,Register rt)1762 void MacroAssembler::Seh(Register rd, Register rt) {
1763   if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1764     seh(rd, rt);
1765   } else {
1766     DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson));
1767     sll(rd, rt, 16);
1768     sra(rd, rd, 16);
1769   }
1770 }
1771 
Neg_s(FPURegister fd,FPURegister fs)1772 void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
1773   if (IsMipsArchVariant(kMips32r6)) {
1774     // r6 neg_s changes the sign for NaN-like operands as well.
1775     neg_s(fd, fs);
1776   } else {
1777     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1778            IsMipsArchVariant(kLoongson));
1779     Label is_nan, done;
1780     Register scratch1 = t8;
1781     Register scratch2 = t9;
1782     BranchF32(nullptr, &is_nan, eq, fs, fs);
1783     Branch(USE_DELAY_SLOT, &done);
1784     // For NaN input, neg_s will return the same NaN value,
1785     // while the sign has to be changed separately.
1786     neg_s(fd, fs);  // In delay slot.
1787     bind(&is_nan);
1788     mfc1(scratch1, fs);
1789     And(scratch2, scratch1, Operand(~kBinary32SignMask));
1790     And(scratch1, scratch1, Operand(kBinary32SignMask));
1791     Xor(scratch1, scratch1, Operand(kBinary32SignMask));
1792     Or(scratch2, scratch2, scratch1);
1793     mtc1(scratch2, fd);
1794     bind(&done);
1795   }
1796 }
1797 
Neg_d(FPURegister fd,FPURegister fs)1798 void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
1799   if (IsMipsArchVariant(kMips32r6)) {
1800     // r6 neg_d changes the sign for NaN-like operands as well.
1801     neg_d(fd, fs);
1802   } else {
1803     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1804            IsMipsArchVariant(kLoongson));
1805     Label is_nan, done;
1806     Register scratch1 = t8;
1807     Register scratch2 = t9;
1808     BranchF64(nullptr, &is_nan, eq, fs, fs);
1809     Branch(USE_DELAY_SLOT, &done);
1810     // For NaN input, neg_d will return the same NaN value,
1811     // while the sign has to be changed separately.
1812     neg_d(fd, fs);  // In delay slot.
1813     bind(&is_nan);
1814     Mfhc1(scratch1, fs);
1815     And(scratch2, scratch1, Operand(~HeapNumber::kSignMask));
1816     And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
1817     Xor(scratch1, scratch1, Operand(HeapNumber::kSignMask));
1818     Or(scratch2, scratch2, scratch1);
1819     Mthc1(scratch2, fd);
1820     bind(&done);
1821   }
1822 }
1823 
Cvt_d_uw(FPURegister fd,Register rs,FPURegister scratch)1824 void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs,
1825                               FPURegister scratch) {
1826   // In FP64Mode we do convertion from long.
1827   if (IsFp64Mode()) {
1828     mtc1(rs, scratch);
1829     Mthc1(zero_reg, scratch);
1830     cvt_d_l(fd, scratch);
1831   } else {
1832     // Convert rs to a FP value in fd.
1833     DCHECK(!fd.is(scratch));
1834     DCHECK(!rs.is(at));
1835 
1836     Label msb_clear, conversion_done;
1837     // For a value which is < 2^31, regard it as a signed positve word.
1838     Branch(&msb_clear, ge, rs, Operand(zero_reg), USE_DELAY_SLOT);
1839     mtc1(rs, fd);
1840 
1841     li(at, 0x41F00000);  // FP value: 2^32.
1842 
1843     // For unsigned inputs > 2^31, we convert to double as a signed int32,
1844     // then add 2^32 to move it back to unsigned value in range 2^31..2^31-1.
1845     mtc1(zero_reg, scratch);
1846     Mthc1(at, scratch);
1847 
1848     cvt_d_w(fd, fd);
1849 
1850     Branch(USE_DELAY_SLOT, &conversion_done);
1851     add_d(fd, fd, scratch);
1852 
1853     bind(&msb_clear);
1854     cvt_d_w(fd, fd);
1855 
1856     bind(&conversion_done);
1857   }
1858 }
1859 
1860 
Trunc_uw_d(FPURegister fd,FPURegister fs,FPURegister scratch)1861 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1862                                 FPURegister fs,
1863                                 FPURegister scratch) {
1864   Trunc_uw_d(fs, t8, scratch);
1865   mtc1(t8, fd);
1866 }
1867 
Trunc_uw_s(FPURegister fd,FPURegister fs,FPURegister scratch)1868 void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
1869                                 FPURegister scratch) {
1870   Trunc_uw_s(fs, t8, scratch);
1871   mtc1(t8, fd);
1872 }
1873 
Trunc_w_d(FPURegister fd,FPURegister fs)1874 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1875   if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1876     Mfhc1(t8, fs);
1877     trunc_w_d(fd, fs);
1878     Mthc1(t8, fs);
1879   } else {
1880     trunc_w_d(fd, fs);
1881   }
1882 }
1883 
1884 
Round_w_d(FPURegister fd,FPURegister fs)1885 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1886   if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1887     Mfhc1(t8, fs);
1888     round_w_d(fd, fs);
1889     Mthc1(t8, fs);
1890   } else {
1891     round_w_d(fd, fs);
1892   }
1893 }
1894 
1895 
Floor_w_d(FPURegister fd,FPURegister fs)1896 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1897   if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1898     Mfhc1(t8, fs);
1899     floor_w_d(fd, fs);
1900     Mthc1(t8, fs);
1901   } else {
1902     floor_w_d(fd, fs);
1903   }
1904 }
1905 
1906 
Ceil_w_d(FPURegister fd,FPURegister fs)1907 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1908   if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1909     Mfhc1(t8, fs);
1910     ceil_w_d(fd, fs);
1911     Mthc1(t8, fs);
1912   } else {
1913     ceil_w_d(fd, fs);
1914   }
1915 }
1916 
1917 
Trunc_uw_d(FPURegister fd,Register rs,FPURegister scratch)1918 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1919                                 Register rs,
1920                                 FPURegister scratch) {
1921   DCHECK(!fd.is(scratch));
1922   DCHECK(!rs.is(at));
1923 
1924   // Load 2^31 into scratch as its float representation.
1925   li(at, 0x41E00000);
1926   mtc1(zero_reg, scratch);
1927   Mthc1(at, scratch);
1928   // Test if scratch > fd.
1929   // If fd < 2^31 we can convert it normally.
1930   Label simple_convert;
1931   BranchF(&simple_convert, NULL, lt, fd, scratch);
1932 
1933   // First we subtract 2^31 from fd, then trunc it to rs
1934   // and add 2^31 to rs.
1935   sub_d(scratch, fd, scratch);
1936   trunc_w_d(scratch, scratch);
1937   mfc1(rs, scratch);
1938   Or(rs, rs, 1 << 31);
1939 
1940   Label done;
1941   Branch(&done);
1942   // Simple conversion.
1943   bind(&simple_convert);
1944   trunc_w_d(scratch, fd);
1945   mfc1(rs, scratch);
1946 
1947   bind(&done);
1948 }
1949 
Trunc_uw_s(FPURegister fd,Register rs,FPURegister scratch)1950 void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
1951                                 FPURegister scratch) {
1952   DCHECK(!fd.is(scratch));
1953   DCHECK(!rs.is(at));
1954 
1955   // Load 2^31 into scratch as its float representation.
1956   li(at, 0x4F000000);
1957   mtc1(at, scratch);
1958   // Test if scratch > fd.
1959   // If fd < 2^31 we can convert it normally.
1960   Label simple_convert;
1961   BranchF32(&simple_convert, NULL, lt, fd, scratch);
1962 
1963   // First we subtract 2^31 from fd, then trunc it to rs
1964   // and add 2^31 to rs.
1965   sub_s(scratch, fd, scratch);
1966   trunc_w_s(scratch, scratch);
1967   mfc1(rs, scratch);
1968   Or(rs, rs, 1 << 31);
1969 
1970   Label done;
1971   Branch(&done);
1972   // Simple conversion.
1973   bind(&simple_convert);
1974   trunc_w_s(scratch, fd);
1975   mfc1(rs, scratch);
1976 
1977   bind(&done);
1978 }
1979 
Mthc1(Register rt,FPURegister fs)1980 void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
1981   if (IsFp32Mode()) {
1982     mtc1(rt, fs.high());
1983   } else {
1984     DCHECK(IsFp64Mode() || IsFpxxMode());
1985     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1986     mthc1(rt, fs);
1987   }
1988 }
1989 
1990 
Mfhc1(Register rt,FPURegister fs)1991 void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
1992   if (IsFp32Mode()) {
1993     mfc1(rt, fs.high());
1994   } else {
1995     DCHECK(IsFp64Mode() || IsFpxxMode());
1996     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1997     mfhc1(rt, fs);
1998   }
1999 }
2000 
Madd_s(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft,FPURegister scratch)2001 void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
2002                             FPURegister ft, FPURegister scratch) {
2003   if (IsMipsArchVariant(kMips32r2)) {
2004     madd_s(fd, fr, fs, ft);
2005   } else {
2006     DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
2007     mul_s(scratch, fs, ft);
2008     add_s(fd, fr, scratch);
2009   }
2010 }
2011 
Madd_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft,FPURegister scratch)2012 void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2013                             FPURegister ft, FPURegister scratch) {
2014   if (IsMipsArchVariant(kMips32r2)) {
2015     madd_d(fd, fr, fs, ft);
2016   } else {
2017     DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
2018     mul_d(scratch, fs, ft);
2019     add_d(fd, fr, scratch);
2020   }
2021 }
2022 
Msub_s(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft,FPURegister scratch)2023 void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
2024                             FPURegister ft, FPURegister scratch) {
2025   if (IsMipsArchVariant(kMips32r2)) {
2026     msub_s(fd, fr, fs, ft);
2027   } else {
2028     DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
2029     mul_s(scratch, fs, ft);
2030     sub_s(fd, scratch, fr);
2031   }
2032 }
2033 
Msub_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft,FPURegister scratch)2034 void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
2035                             FPURegister ft, FPURegister scratch) {
2036   if (IsMipsArchVariant(kMips32r2)) {
2037     msub_d(fd, fr, fs, ft);
2038   } else {
2039     DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
2040     mul_d(scratch, fs, ft);
2041     sub_d(fd, scratch, fr);
2042   }
2043 }
2044 
BranchFCommon(SecondaryField sizeField,Label * target,Label * nan,Condition cond,FPURegister cmp1,FPURegister cmp2,BranchDelaySlot bd)2045 void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
2046                                    Label* nan, Condition cond, FPURegister cmp1,
2047                                    FPURegister cmp2, BranchDelaySlot bd) {
2048   {
2049     BlockTrampolinePoolScope block_trampoline_pool(this);
2050     if (cond == al) {
2051       Branch(bd, target);
2052       return;
2053     }
2054 
2055     if (IsMipsArchVariant(kMips32r6)) {
2056       sizeField = sizeField == D ? L : W;
2057     }
2058     DCHECK(nan || target);
2059     // Check for unordered (NaN) cases.
2060     if (nan) {
2061       bool long_branch =
2062           nan->is_bound() ? !is_near(nan) : is_trampoline_emitted();
2063       if (!IsMipsArchVariant(kMips32r6)) {
2064         if (long_branch) {
2065           Label skip;
2066           c(UN, sizeField, cmp1, cmp2);
2067           bc1f(&skip);
2068           nop();
2069           BranchLong(nan, bd);
2070           bind(&skip);
2071         } else {
2072           c(UN, sizeField, cmp1, cmp2);
2073           bc1t(nan);
2074           if (bd == PROTECT) {
2075             nop();
2076           }
2077         }
2078       } else {
2079         // Use kDoubleCompareReg for comparison result. It has to be unavailable
2080         // to lithium register allocator.
2081         DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
2082         if (long_branch) {
2083           Label skip;
2084           cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
2085           bc1eqz(&skip, kDoubleCompareReg);
2086           nop();
2087           BranchLong(nan, bd);
2088           bind(&skip);
2089         } else {
2090           cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
2091           bc1nez(nan, kDoubleCompareReg);
2092           if (bd == PROTECT) {
2093             nop();
2094           }
2095         }
2096       }
2097     }
2098 
2099     if (target) {
2100       bool long_branch =
2101           target->is_bound() ? !is_near(target) : is_trampoline_emitted();
2102       if (long_branch) {
2103         Label skip;
2104         Condition neg_cond = NegateFpuCondition(cond);
2105         BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
2106         BranchLong(target, bd);
2107         bind(&skip);
2108       } else {
2109         BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
2110       }
2111     }
2112   }
2113 }
2114 
BranchShortF(SecondaryField sizeField,Label * target,Condition cc,FPURegister cmp1,FPURegister cmp2,BranchDelaySlot bd)2115 void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
2116                                   Condition cc, FPURegister cmp1,
2117                                   FPURegister cmp2, BranchDelaySlot bd) {
2118   if (!IsMipsArchVariant(kMips32r6)) {
2119     BlockTrampolinePoolScope block_trampoline_pool(this);
2120     if (target) {
2121       // Here NaN cases were either handled by this function or are assumed to
2122       // have been handled by the caller.
2123       switch (cc) {
2124         case lt:
2125           c(OLT, sizeField, cmp1, cmp2);
2126           bc1t(target);
2127           break;
2128         case ult:
2129           c(ULT, sizeField, cmp1, cmp2);
2130           bc1t(target);
2131           break;
2132         case gt:
2133           c(ULE, sizeField, cmp1, cmp2);
2134           bc1f(target);
2135           break;
2136         case ugt:
2137           c(OLE, sizeField, cmp1, cmp2);
2138           bc1f(target);
2139           break;
2140         case ge:
2141           c(ULT, sizeField, cmp1, cmp2);
2142           bc1f(target);
2143           break;
2144         case uge:
2145           c(OLT, sizeField, cmp1, cmp2);
2146           bc1f(target);
2147           break;
2148         case le:
2149           c(OLE, sizeField, cmp1, cmp2);
2150           bc1t(target);
2151           break;
2152         case ule:
2153           c(ULE, sizeField, cmp1, cmp2);
2154           bc1t(target);
2155           break;
2156         case eq:
2157           c(EQ, sizeField, cmp1, cmp2);
2158           bc1t(target);
2159           break;
2160         case ueq:
2161           c(UEQ, sizeField, cmp1, cmp2);
2162           bc1t(target);
2163           break;
2164         case ne:  // Unordered or not equal.
2165           c(EQ, sizeField, cmp1, cmp2);
2166           bc1f(target);
2167           break;
2168         case ogl:
2169           c(UEQ, sizeField, cmp1, cmp2);
2170           bc1f(target);
2171           break;
2172         default:
2173           CHECK(0);
2174       }
2175     }
2176   } else {
2177     BlockTrampolinePoolScope block_trampoline_pool(this);
2178     if (target) {
2179       // Here NaN cases were either handled by this function or are assumed to
2180       // have been handled by the caller.
2181       // Unsigned conditions are treated as their signed counterpart.
2182       // Use kDoubleCompareReg for comparison result, it is
2183       // valid in fp64 (FR = 1) mode which is implied for mips32r6.
2184       DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
2185       switch (cc) {
2186         case lt:
2187           cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2188           bc1nez(target, kDoubleCompareReg);
2189           break;
2190         case ult:
2191           cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2192           bc1nez(target, kDoubleCompareReg);
2193           break;
2194         case gt:
2195           cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2196           bc1eqz(target, kDoubleCompareReg);
2197           break;
2198         case ugt:
2199           cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2200           bc1eqz(target, kDoubleCompareReg);
2201           break;
2202         case ge:
2203           cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2204           bc1eqz(target, kDoubleCompareReg);
2205           break;
2206         case uge:
2207           cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2208           bc1eqz(target, kDoubleCompareReg);
2209           break;
2210         case le:
2211           cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2212           bc1nez(target, kDoubleCompareReg);
2213           break;
2214         case ule:
2215           cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2216           bc1nez(target, kDoubleCompareReg);
2217           break;
2218         case eq:
2219           cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2220           bc1nez(target, kDoubleCompareReg);
2221           break;
2222         case ueq:
2223           cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2224           bc1nez(target, kDoubleCompareReg);
2225           break;
2226         case ne:
2227           cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2228           bc1eqz(target, kDoubleCompareReg);
2229           break;
2230         case ogl:
2231           cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2232           bc1eqz(target, kDoubleCompareReg);
2233           break;
2234         default:
2235           CHECK(0);
2236       }
2237     }
2238   }
2239   if (bd == PROTECT) {
2240     nop();
2241   }
2242 }
2243 
2244 
FmoveLow(FPURegister dst,Register src_low)2245 void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
2246   if (IsFp32Mode()) {
2247     mtc1(src_low, dst);
2248   } else {
2249     DCHECK(IsFp64Mode() || IsFpxxMode());
2250     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2251     DCHECK(!src_low.is(at));
2252     mfhc1(at, dst);
2253     mtc1(src_low, dst);
2254     mthc1(at, dst);
2255   }
2256 }
2257 
2258 
Move(FPURegister dst,float imm)2259 void MacroAssembler::Move(FPURegister dst, float imm) {
2260   li(at, Operand(bit_cast<int32_t>(imm)));
2261   mtc1(at, dst);
2262 }
2263 
2264 
Move(FPURegister dst,double imm)2265 void MacroAssembler::Move(FPURegister dst, double imm) {
2266   int64_t imm_bits = bit_cast<int64_t>(imm);
2267   // Handle special values first.
2268   if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) {
2269     mov_d(dst, kDoubleRegZero);
2270   } else if (imm_bits == bit_cast<int64_t>(-0.0) && has_double_zero_reg_set_) {
2271     Neg_d(dst, kDoubleRegZero);
2272   } else {
2273     uint32_t lo, hi;
2274     DoubleAsTwoUInt32(imm, &lo, &hi);
2275     // Move the low part of the double into the lower of the corresponding FPU
2276     // register of FPU register pair.
2277     if (lo != 0) {
2278       li(at, Operand(lo));
2279       mtc1(at, dst);
2280     } else {
2281       mtc1(zero_reg, dst);
2282     }
2283     // Move the high part of the double into the higher of the corresponding FPU
2284     // register of FPU register pair.
2285     if (hi != 0) {
2286       li(at, Operand(hi));
2287       Mthc1(at, dst);
2288     } else {
2289       Mthc1(zero_reg, dst);
2290     }
2291     if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
2292   }
2293 }
2294 
2295 
Movz(Register rd,Register rs,Register rt)2296 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
2297   if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
2298     Label done;
2299     Branch(&done, ne, rt, Operand(zero_reg));
2300     mov(rd, rs);
2301     bind(&done);
2302   } else {
2303     movz(rd, rs, rt);
2304   }
2305 }
2306 
2307 
Movn(Register rd,Register rs,Register rt)2308 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
2309   if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
2310     Label done;
2311     Branch(&done, eq, rt, Operand(zero_reg));
2312     mov(rd, rs);
2313     bind(&done);
2314   } else {
2315     movn(rd, rs, rt);
2316   }
2317 }
2318 
2319 
Movt(Register rd,Register rs,uint16_t cc)2320 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
2321   if (IsMipsArchVariant(kLoongson)) {
2322     // Tests an FP condition code and then conditionally move rs to rd.
2323     // We do not currently use any FPU cc bit other than bit 0.
2324     DCHECK(cc == 0);
2325     DCHECK(!(rs.is(t8) || rd.is(t8)));
2326     Label done;
2327     Register scratch = t8;
2328     // For testing purposes we need to fetch content of the FCSR register and
2329     // than test its cc (floating point condition code) bit (for cc = 0, it is
2330     // 24. bit of the FCSR).
2331     cfc1(scratch, FCSR);
2332     // For the MIPS I, II and III architectures, the contents of scratch is
2333     // UNPREDICTABLE for the instruction immediately following CFC1.
2334     nop();
2335     srl(scratch, scratch, 16);
2336     andi(scratch, scratch, 0x0080);
2337     Branch(&done, eq, scratch, Operand(zero_reg));
2338     mov(rd, rs);
2339     bind(&done);
2340   } else {
2341     movt(rd, rs, cc);
2342   }
2343 }
2344 
2345 
Movf(Register rd,Register rs,uint16_t cc)2346 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
2347   if (IsMipsArchVariant(kLoongson)) {
2348     // Tests an FP condition code and then conditionally move rs to rd.
2349     // We do not currently use any FPU cc bit other than bit 0.
2350     DCHECK(cc == 0);
2351     DCHECK(!(rs.is(t8) || rd.is(t8)));
2352     Label done;
2353     Register scratch = t8;
2354     // For testing purposes we need to fetch content of the FCSR register and
2355     // than test its cc (floating point condition code) bit (for cc = 0, it is
2356     // 24. bit of the FCSR).
2357     cfc1(scratch, FCSR);
2358     // For the MIPS I, II and III architectures, the contents of scratch is
2359     // UNPREDICTABLE for the instruction immediately following CFC1.
2360     nop();
2361     srl(scratch, scratch, 16);
2362     andi(scratch, scratch, 0x0080);
2363     Branch(&done, ne, scratch, Operand(zero_reg));
2364     mov(rd, rs);
2365     bind(&done);
2366   } else {
2367     movf(rd, rs, cc);
2368   }
2369 }
2370 
Clz(Register rd,Register rs)2371 void MacroAssembler::Clz(Register rd, Register rs) {
2372   if (IsMipsArchVariant(kLoongson)) {
2373     DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
2374     Register mask = t8;
2375     Register scratch = t9;
2376     Label loop, end;
2377     mov(at, rs);
2378     mov(rd, zero_reg);
2379     lui(mask, 0x8000);
2380     bind(&loop);
2381     and_(scratch, at, mask);
2382     Branch(&end, ne, scratch, Operand(zero_reg));
2383     addiu(rd, rd, 1);
2384     Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
2385     srl(mask, mask, 1);
2386     bind(&end);
2387   } else {
2388     clz(rd, rs);
2389   }
2390 }
2391 
2392 
EmitFPUTruncate(FPURoundingMode rounding_mode,Register result,DoubleRegister double_input,Register scratch,DoubleRegister double_scratch,Register except_flag,CheckForInexactConversion check_inexact)2393 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
2394                                      Register result,
2395                                      DoubleRegister double_input,
2396                                      Register scratch,
2397                                      DoubleRegister double_scratch,
2398                                      Register except_flag,
2399                                      CheckForInexactConversion check_inexact) {
2400   DCHECK(!result.is(scratch));
2401   DCHECK(!double_input.is(double_scratch));
2402   DCHECK(!except_flag.is(scratch));
2403 
2404   Label done;
2405 
2406   // Clear the except flag (0 = no exception)
2407   mov(except_flag, zero_reg);
2408 
2409   // Test for values that can be exactly represented as a signed 32-bit integer.
2410   cvt_w_d(double_scratch, double_input);
2411   mfc1(result, double_scratch);
2412   cvt_d_w(double_scratch, double_scratch);
2413   BranchF(&done, NULL, eq, double_input, double_scratch);
2414 
2415   int32_t except_mask = kFCSRFlagMask;  // Assume interested in all exceptions.
2416 
2417   if (check_inexact == kDontCheckForInexactConversion) {
2418     // Ignore inexact exceptions.
2419     except_mask &= ~kFCSRInexactFlagMask;
2420   }
2421 
2422   // Save FCSR.
2423   cfc1(scratch, FCSR);
2424   // Disable FPU exceptions.
2425   ctc1(zero_reg, FCSR);
2426 
2427   // Do operation based on rounding mode.
2428   switch (rounding_mode) {
2429     case kRoundToNearest:
2430       Round_w_d(double_scratch, double_input);
2431       break;
2432     case kRoundToZero:
2433       Trunc_w_d(double_scratch, double_input);
2434       break;
2435     case kRoundToPlusInf:
2436       Ceil_w_d(double_scratch, double_input);
2437       break;
2438     case kRoundToMinusInf:
2439       Floor_w_d(double_scratch, double_input);
2440       break;
2441   }  // End of switch-statement.
2442 
2443   // Retrieve FCSR.
2444   cfc1(except_flag, FCSR);
2445   // Restore FCSR.
2446   ctc1(scratch, FCSR);
2447   // Move the converted value into the result register.
2448   mfc1(result, double_scratch);
2449 
2450   // Check for fpu exceptions.
2451   And(except_flag, except_flag, Operand(except_mask));
2452 
2453   bind(&done);
2454 }
2455 
2456 
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)2457 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2458                                                 DoubleRegister double_input,
2459                                                 Label* done) {
2460   DoubleRegister single_scratch = kLithiumScratchDouble.low();
2461   Register scratch = at;
2462   Register scratch2 = t9;
2463 
2464   // Clear cumulative exception flags and save the FCSR.
2465   cfc1(scratch2, FCSR);
2466   ctc1(zero_reg, FCSR);
2467   // Try a conversion to a signed integer.
2468   trunc_w_d(single_scratch, double_input);
2469   mfc1(result, single_scratch);
2470   // Retrieve and restore the FCSR.
2471   cfc1(scratch, FCSR);
2472   ctc1(scratch2, FCSR);
2473   // Check for overflow and NaNs.
2474   And(scratch,
2475       scratch,
2476       kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
2477   // If we had no exceptions we are done.
2478   Branch(done, eq, scratch, Operand(zero_reg));
2479 }
2480 
2481 
TruncateDoubleToI(Register result,DoubleRegister double_input)2482 void MacroAssembler::TruncateDoubleToI(Register result,
2483                                        DoubleRegister double_input) {
2484   Label done;
2485 
2486   TryInlineTruncateDoubleToI(result, double_input, &done);
2487 
2488   // If we fell through then inline version didn't succeed - call stub instead.
2489   push(ra);
2490   Subu(sp, sp, Operand(kDoubleSize));  // Put input on stack.
2491   sdc1(double_input, MemOperand(sp, 0));
2492 
2493   DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2494   CallStub(&stub);
2495 
2496   Addu(sp, sp, Operand(kDoubleSize));
2497   pop(ra);
2498 
2499   bind(&done);
2500 }
2501 
2502 
TruncateHeapNumberToI(Register result,Register object)2503 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2504   Label done;
2505   DoubleRegister double_scratch = f12;
2506   DCHECK(!result.is(object));
2507 
2508   ldc1(double_scratch,
2509        MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2510   TryInlineTruncateDoubleToI(result, double_scratch, &done);
2511 
2512   // If we fell through then inline version didn't succeed - call stub instead.
2513   push(ra);
2514   DoubleToIStub stub(isolate(),
2515                      object,
2516                      result,
2517                      HeapNumber::kValueOffset - kHeapObjectTag,
2518                      true,
2519                      true);
2520   CallStub(&stub);
2521   pop(ra);
2522 
2523   bind(&done);
2524 }
2525 
2526 
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch,Label * not_number)2527 void MacroAssembler::TruncateNumberToI(Register object,
2528                                        Register result,
2529                                        Register heap_number_map,
2530                                        Register scratch,
2531                                        Label* not_number) {
2532   Label done;
2533   DCHECK(!result.is(object));
2534 
2535   UntagAndJumpIfSmi(result, object, &done);
2536   JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
2537   TruncateHeapNumberToI(result, object);
2538 
2539   bind(&done);
2540 }
2541 
2542 
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)2543 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2544                                          Register src,
2545                                          int num_least_bits) {
2546   Ext(dst, src, kSmiTagSize, num_least_bits);
2547 }
2548 
2549 
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)2550 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2551                                            Register src,
2552                                            int num_least_bits) {
2553   And(dst, src, Operand((1 << num_least_bits) - 1));
2554 }
2555 
2556 
2557 // Emulated condtional branches do not emit a nop in the branch delay slot.
2558 //
2559 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
2560 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK(                                \
2561     (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) ||          \
2562     (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
2563 
2564 
Branch(int32_t offset,BranchDelaySlot bdslot)2565 void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
2566   DCHECK(IsMipsArchVariant(kMips32r6) ? is_int26(offset) : is_int16(offset));
2567   BranchShort(offset, bdslot);
2568 }
2569 
2570 
Branch(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2571 void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
2572                             const Operand& rt, BranchDelaySlot bdslot) {
2573   bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
2574   DCHECK(is_near);
2575   USE(is_near);
2576 }
2577 
2578 
Branch(Label * L,BranchDelaySlot bdslot)2579 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
2580   if (L->is_bound()) {
2581     if (is_near_branch(L)) {
2582       BranchShort(L, bdslot);
2583     } else {
2584       BranchLong(L, bdslot);
2585     }
2586   } else {
2587     if (is_trampoline_emitted()) {
2588       BranchLong(L, bdslot);
2589     } else {
2590       BranchShort(L, bdslot);
2591     }
2592   }
2593 }
2594 
2595 
Branch(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2596 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
2597                             const Operand& rt,
2598                             BranchDelaySlot bdslot) {
2599   if (L->is_bound()) {
2600     if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
2601       if (cond != cc_always) {
2602         Label skip;
2603         Condition neg_cond = NegateCondition(cond);
2604         BranchShort(&skip, neg_cond, rs, rt);
2605         BranchLong(L, bdslot);
2606         bind(&skip);
2607       } else {
2608         BranchLong(L, bdslot);
2609       }
2610     }
2611   } else {
2612     if (is_trampoline_emitted()) {
2613       if (cond != cc_always) {
2614         Label skip;
2615         Condition neg_cond = NegateCondition(cond);
2616         BranchShort(&skip, neg_cond, rs, rt);
2617         BranchLong(L, bdslot);
2618         bind(&skip);
2619       } else {
2620         BranchLong(L, bdslot);
2621       }
2622     } else {
2623       BranchShort(L, cond, rs, rt, bdslot);
2624     }
2625   }
2626 }
2627 
2628 
Branch(Label * L,Condition cond,Register rs,Heap::RootListIndex index,BranchDelaySlot bdslot)2629 void MacroAssembler::Branch(Label* L,
2630                             Condition cond,
2631                             Register rs,
2632                             Heap::RootListIndex index,
2633                             BranchDelaySlot bdslot) {
2634   LoadRoot(at, index);
2635   Branch(L, cond, rs, Operand(at), bdslot);
2636 }
2637 
2638 
BranchShortHelper(int16_t offset,Label * L,BranchDelaySlot bdslot)2639 void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
2640                                        BranchDelaySlot bdslot) {
2641   DCHECK(L == nullptr || offset == 0);
2642   offset = GetOffset(offset, L, OffsetSize::kOffset16);
2643   b(offset);
2644 
2645   // Emit a nop in the branch delay slot if required.
2646   if (bdslot == PROTECT)
2647     nop();
2648 }
2649 
2650 
BranchShortHelperR6(int32_t offset,Label * L)2651 void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
2652   DCHECK(L == nullptr || offset == 0);
2653   offset = GetOffset(offset, L, OffsetSize::kOffset26);
2654   bc(offset);
2655 }
2656 
2657 
BranchShort(int32_t offset,BranchDelaySlot bdslot)2658 void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
2659   if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
2660     DCHECK(is_int26(offset));
2661     BranchShortHelperR6(offset, nullptr);
2662   } else {
2663     DCHECK(is_int16(offset));
2664     BranchShortHelper(offset, nullptr, bdslot);
2665   }
2666 }
2667 
2668 
BranchShort(Label * L,BranchDelaySlot bdslot)2669 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2670   if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
2671     BranchShortHelperR6(0, L);
2672   } else {
2673     BranchShortHelper(0, L, bdslot);
2674   }
2675 }
2676 
2677 
IsZero(const Operand & rt)2678 static inline bool IsZero(const Operand& rt) {
2679   if (rt.is_reg()) {
2680     return rt.rm().is(zero_reg);
2681   } else {
2682     return rt.immediate() == 0;
2683   }
2684 }
2685 
2686 
GetOffset(int32_t offset,Label * L,OffsetSize bits)2687 int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
2688   if (L) {
2689     offset = branch_offset_helper(L, bits) >> 2;
2690   } else {
2691     DCHECK(is_intn(offset, bits));
2692   }
2693   return offset;
2694 }
2695 
2696 
GetRtAsRegisterHelper(const Operand & rt,Register scratch)2697 Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
2698                                                Register scratch) {
2699   Register r2 = no_reg;
2700   if (rt.is_reg()) {
2701     r2 = rt.rm_;
2702   } else {
2703     r2 = scratch;
2704     li(r2, rt);
2705   }
2706 
2707   return r2;
2708 }
2709 
2710 
BranchShortHelperR6(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt)2711 bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
2712                                          Condition cond, Register rs,
2713                                          const Operand& rt) {
2714   DCHECK(L == nullptr || offset == 0);
2715   Register scratch = rs.is(at) ? t8 : at;
2716   OffsetSize bits = OffsetSize::kOffset16;
2717 
2718   // Be careful to always use shifted_branch_offset only just before the
2719   // branch instruction, as the location will be remember for patching the
2720   // target.
2721   {
2722     BlockTrampolinePoolScope block_trampoline_pool(this);
2723     switch (cond) {
2724       case cc_always:
2725         bits = OffsetSize::kOffset26;
2726         if (!is_near(L, bits)) return false;
2727         offset = GetOffset(offset, L, bits);
2728         bc(offset);
2729         break;
2730       case eq:
2731         if (rs.code() == rt.rm_.reg_code) {
2732           // Pre R6 beq is used here to make the code patchable. Otherwise bc
2733           // should be used which has no condition field so is not patchable.
2734           bits = OffsetSize::kOffset16;
2735           if (!is_near(L, bits)) return false;
2736           scratch = GetRtAsRegisterHelper(rt, scratch);
2737           offset = GetOffset(offset, L, bits);
2738           beq(rs, scratch, offset);
2739           nop();
2740         } else if (IsZero(rt)) {
2741           bits = OffsetSize::kOffset21;
2742           if (!is_near(L, bits)) return false;
2743           offset = GetOffset(offset, L, bits);
2744           beqzc(rs, offset);
2745         } else {
2746           // We don't want any other register but scratch clobbered.
2747           bits = OffsetSize::kOffset16;
2748           if (!is_near(L, bits)) return false;
2749           scratch = GetRtAsRegisterHelper(rt, scratch);
2750           offset = GetOffset(offset, L, bits);
2751           beqc(rs, scratch, offset);
2752         }
2753         break;
2754       case ne:
2755         if (rs.code() == rt.rm_.reg_code) {
2756           // Pre R6 bne is used here to make the code patchable. Otherwise we
2757           // should not generate any instruction.
2758           bits = OffsetSize::kOffset16;
2759           if (!is_near(L, bits)) return false;
2760           scratch = GetRtAsRegisterHelper(rt, scratch);
2761           offset = GetOffset(offset, L, bits);
2762           bne(rs, scratch, offset);
2763           nop();
2764         } else if (IsZero(rt)) {
2765           bits = OffsetSize::kOffset21;
2766           if (!is_near(L, bits)) return false;
2767           offset = GetOffset(offset, L, bits);
2768           bnezc(rs, offset);
2769         } else {
2770           // We don't want any other register but scratch clobbered.
2771           bits = OffsetSize::kOffset16;
2772           if (!is_near(L, bits)) return false;
2773           scratch = GetRtAsRegisterHelper(rt, scratch);
2774           offset = GetOffset(offset, L, bits);
2775           bnec(rs, scratch, offset);
2776         }
2777         break;
2778 
2779       // Signed comparison.
2780       case greater:
2781         // rs > rt
2782         if (rs.code() == rt.rm_.reg_code) {
2783           break;  // No code needs to be emitted.
2784         } else if (rs.is(zero_reg)) {
2785           bits = OffsetSize::kOffset16;
2786           if (!is_near(L, bits)) return false;
2787           scratch = GetRtAsRegisterHelper(rt, scratch);
2788           offset = GetOffset(offset, L, bits);
2789           bltzc(scratch, offset);
2790         } else if (IsZero(rt)) {
2791           bits = OffsetSize::kOffset16;
2792           if (!is_near(L, bits)) return false;
2793           offset = GetOffset(offset, L, bits);
2794           bgtzc(rs, offset);
2795         } else {
2796           bits = OffsetSize::kOffset16;
2797           if (!is_near(L, bits)) return false;
2798           scratch = GetRtAsRegisterHelper(rt, scratch);
2799           DCHECK(!rs.is(scratch));
2800           offset = GetOffset(offset, L, bits);
2801           bltc(scratch, rs, offset);
2802         }
2803         break;
2804       case greater_equal:
2805         // rs >= rt
2806         if (rs.code() == rt.rm_.reg_code) {
2807           bits = OffsetSize::kOffset26;
2808           if (!is_near(L, bits)) return false;
2809           offset = GetOffset(offset, L, bits);
2810           bc(offset);
2811         } else if (rs.is(zero_reg)) {
2812           bits = OffsetSize::kOffset16;
2813           if (!is_near(L, bits)) return false;
2814           scratch = GetRtAsRegisterHelper(rt, scratch);
2815           offset = GetOffset(offset, L, bits);
2816           blezc(scratch, offset);
2817         } else if (IsZero(rt)) {
2818           bits = OffsetSize::kOffset16;
2819           if (!is_near(L, bits)) return false;
2820           offset = GetOffset(offset, L, bits);
2821           bgezc(rs, offset);
2822         } else {
2823           bits = OffsetSize::kOffset16;
2824           if (!is_near(L, bits)) return false;
2825           scratch = GetRtAsRegisterHelper(rt, scratch);
2826           DCHECK(!rs.is(scratch));
2827           offset = GetOffset(offset, L, bits);
2828           bgec(rs, scratch, offset);
2829         }
2830         break;
2831       case less:
2832         // rs < rt
2833         if (rs.code() == rt.rm_.reg_code) {
2834           break;  // No code needs to be emitted.
2835         } else if (rs.is(zero_reg)) {
2836           bits = OffsetSize::kOffset16;
2837           if (!is_near(L, bits)) return false;
2838           scratch = GetRtAsRegisterHelper(rt, scratch);
2839           offset = GetOffset(offset, L, bits);
2840           bgtzc(scratch, offset);
2841         } else if (IsZero(rt)) {
2842           bits = OffsetSize::kOffset16;
2843           if (!is_near(L, bits)) return false;
2844           offset = GetOffset(offset, L, bits);
2845           bltzc(rs, offset);
2846         } else {
2847           bits = OffsetSize::kOffset16;
2848           if (!is_near(L, bits)) return false;
2849           scratch = GetRtAsRegisterHelper(rt, scratch);
2850           DCHECK(!rs.is(scratch));
2851           offset = GetOffset(offset, L, bits);
2852           bltc(rs, scratch, offset);
2853         }
2854         break;
2855       case less_equal:
2856         // rs <= rt
2857         if (rs.code() == rt.rm_.reg_code) {
2858           bits = OffsetSize::kOffset26;
2859           if (!is_near(L, bits)) return false;
2860           offset = GetOffset(offset, L, bits);
2861           bc(offset);
2862         } else if (rs.is(zero_reg)) {
2863           bits = OffsetSize::kOffset16;
2864           if (!is_near(L, bits)) return false;
2865           scratch = GetRtAsRegisterHelper(rt, scratch);
2866           offset = GetOffset(offset, L, bits);
2867           bgezc(scratch, offset);
2868         } else if (IsZero(rt)) {
2869           bits = OffsetSize::kOffset16;
2870           if (!is_near(L, bits)) return false;
2871           offset = GetOffset(offset, L, bits);
2872           blezc(rs, offset);
2873         } else {
2874           bits = OffsetSize::kOffset16;
2875           if (!is_near(L, bits)) return false;
2876           scratch = GetRtAsRegisterHelper(rt, scratch);
2877           DCHECK(!rs.is(scratch));
2878           offset = GetOffset(offset, L, bits);
2879           bgec(scratch, rs, offset);
2880         }
2881         break;
2882 
2883       // Unsigned comparison.
2884       case Ugreater:
2885         // rs > rt
2886         if (rs.code() == rt.rm_.reg_code) {
2887           break;  // No code needs to be emitted.
2888         } else if (rs.is(zero_reg)) {
2889           bits = OffsetSize::kOffset21;
2890           if (!is_near(L, bits)) return false;
2891           scratch = GetRtAsRegisterHelper(rt, scratch);
2892           offset = GetOffset(offset, L, bits);
2893           bnezc(scratch, offset);
2894         } else if (IsZero(rt)) {
2895           bits = OffsetSize::kOffset21;
2896           if (!is_near(L, bits)) return false;
2897           offset = GetOffset(offset, L, bits);
2898           bnezc(rs, offset);
2899         } else {
2900           bits = OffsetSize::kOffset16;
2901           if (!is_near(L, bits)) return false;
2902           scratch = GetRtAsRegisterHelper(rt, scratch);
2903           DCHECK(!rs.is(scratch));
2904           offset = GetOffset(offset, L, bits);
2905           bltuc(scratch, rs, offset);
2906         }
2907         break;
2908       case Ugreater_equal:
2909         // rs >= rt
2910         if (rs.code() == rt.rm_.reg_code) {
2911           bits = OffsetSize::kOffset26;
2912           if (!is_near(L, bits)) return false;
2913           offset = GetOffset(offset, L, bits);
2914           bc(offset);
2915         } else if (rs.is(zero_reg)) {
2916           bits = OffsetSize::kOffset21;
2917           if (!is_near(L, bits)) return false;
2918           scratch = GetRtAsRegisterHelper(rt, scratch);
2919           offset = GetOffset(offset, L, bits);
2920           beqzc(scratch, offset);
2921         } else if (IsZero(rt)) {
2922           bits = OffsetSize::kOffset26;
2923           if (!is_near(L, bits)) return false;
2924           offset = GetOffset(offset, L, bits);
2925           bc(offset);
2926         } else {
2927           bits = OffsetSize::kOffset16;
2928           if (!is_near(L, bits)) return false;
2929           scratch = GetRtAsRegisterHelper(rt, scratch);
2930           DCHECK(!rs.is(scratch));
2931           offset = GetOffset(offset, L, bits);
2932           bgeuc(rs, scratch, offset);
2933         }
2934         break;
2935       case Uless:
2936         // rs < rt
2937         if (rs.code() == rt.rm_.reg_code) {
2938           break;  // No code needs to be emitted.
2939         } else if (rs.is(zero_reg)) {
2940           bits = OffsetSize::kOffset21;
2941           if (!is_near(L, bits)) return false;
2942           scratch = GetRtAsRegisterHelper(rt, scratch);
2943           offset = GetOffset(offset, L, bits);
2944           bnezc(scratch, offset);
2945         } else if (IsZero(rt)) {
2946           break;  // No code needs to be emitted.
2947         } else {
2948           bits = OffsetSize::kOffset16;
2949           if (!is_near(L, bits)) return false;
2950           scratch = GetRtAsRegisterHelper(rt, scratch);
2951           DCHECK(!rs.is(scratch));
2952           offset = GetOffset(offset, L, bits);
2953           bltuc(rs, scratch, offset);
2954         }
2955         break;
2956       case Uless_equal:
2957         // rs <= rt
2958         if (rs.code() == rt.rm_.reg_code) {
2959           bits = OffsetSize::kOffset26;
2960           if (!is_near(L, bits)) return false;
2961           offset = GetOffset(offset, L, bits);
2962           bc(offset);
2963         } else if (rs.is(zero_reg)) {
2964           bits = OffsetSize::kOffset26;
2965           if (!is_near(L, bits)) return false;
2966           scratch = GetRtAsRegisterHelper(rt, scratch);
2967           offset = GetOffset(offset, L, bits);
2968           bc(offset);
2969         } else if (IsZero(rt)) {
2970           bits = OffsetSize::kOffset21;
2971           if (!is_near(L, bits)) return false;
2972           offset = GetOffset(offset, L, bits);
2973           beqzc(rs, offset);
2974         } else {
2975           bits = OffsetSize::kOffset16;
2976           if (!is_near(L, bits)) return false;
2977           scratch = GetRtAsRegisterHelper(rt, scratch);
2978           DCHECK(!rs.is(scratch));
2979           offset = GetOffset(offset, L, bits);
2980           bgeuc(scratch, rs, offset);
2981         }
2982         break;
2983       default:
2984         UNREACHABLE();
2985     }
2986   }
2987   CheckTrampolinePoolQuick(1);
2988   return true;
2989 }
2990 
2991 
BranchShortHelper(int16_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2992 bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
2993                                        Register rs, const Operand& rt,
2994                                        BranchDelaySlot bdslot) {
2995   DCHECK(L == nullptr || offset == 0);
2996   if (!is_near(L, OffsetSize::kOffset16)) return false;
2997 
2998   Register scratch = at;
2999   int32_t offset32;
3000 
3001   // Be careful to always use shifted_branch_offset only just before the
3002   // branch instruction, as the location will be remember for patching the
3003   // target.
3004   {
3005     BlockTrampolinePoolScope block_trampoline_pool(this);
3006     switch (cond) {
3007       case cc_always:
3008         offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3009         b(offset32);
3010         break;
3011       case eq:
3012         if (IsZero(rt)) {
3013           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3014           beq(rs, zero_reg, offset32);
3015         } else {
3016           // We don't want any other register but scratch clobbered.
3017           scratch = GetRtAsRegisterHelper(rt, scratch);
3018           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3019           beq(rs, scratch, offset32);
3020         }
3021         break;
3022       case ne:
3023         if (IsZero(rt)) {
3024           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3025           bne(rs, zero_reg, offset32);
3026         } else {
3027           // We don't want any other register but scratch clobbered.
3028           scratch = GetRtAsRegisterHelper(rt, scratch);
3029           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3030           bne(rs, scratch, offset32);
3031         }
3032         break;
3033 
3034       // Signed comparison.
3035       case greater:
3036         if (IsZero(rt)) {
3037           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3038           bgtz(rs, offset32);
3039         } else {
3040           Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3041           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3042           bne(scratch, zero_reg, offset32);
3043         }
3044         break;
3045       case greater_equal:
3046         if (IsZero(rt)) {
3047           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3048           bgez(rs, offset32);
3049         } else {
3050           Slt(scratch, rs, rt);
3051           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3052           beq(scratch, zero_reg, offset32);
3053         }
3054         break;
3055       case less:
3056         if (IsZero(rt)) {
3057           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3058           bltz(rs, offset32);
3059         } else {
3060           Slt(scratch, rs, rt);
3061           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3062           bne(scratch, zero_reg, offset32);
3063         }
3064         break;
3065       case less_equal:
3066         if (IsZero(rt)) {
3067           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3068           blez(rs, offset32);
3069         } else {
3070           Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3071           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3072           beq(scratch, zero_reg, offset32);
3073         }
3074         break;
3075 
3076       // Unsigned comparison.
3077       case Ugreater:
3078         if (IsZero(rt)) {
3079           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3080           bne(rs, zero_reg, offset32);
3081         } else {
3082           Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3083           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3084           bne(scratch, zero_reg, offset32);
3085         }
3086         break;
3087       case Ugreater_equal:
3088         if (IsZero(rt)) {
3089           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3090           b(offset32);
3091         } else {
3092           Sltu(scratch, rs, rt);
3093           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3094           beq(scratch, zero_reg, offset32);
3095         }
3096         break;
3097       case Uless:
3098         if (IsZero(rt)) {
3099           return true;  // No code needs to be emitted.
3100         } else {
3101           Sltu(scratch, rs, rt);
3102           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3103           bne(scratch, zero_reg, offset32);
3104         }
3105         break;
3106       case Uless_equal:
3107         if (IsZero(rt)) {
3108           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3109           beq(rs, zero_reg, offset32);
3110         } else {
3111           Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3112           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3113           beq(scratch, zero_reg, offset32);
3114         }
3115         break;
3116       default:
3117         UNREACHABLE();
3118     }
3119   }
3120   // Emit a nop in the branch delay slot if required.
3121   if (bdslot == PROTECT)
3122     nop();
3123 
3124   return true;
3125 }
3126 
3127 
BranchShortCheck(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3128 bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
3129                                       Register rs, const Operand& rt,
3130                                       BranchDelaySlot bdslot) {
3131   BRANCH_ARGS_CHECK(cond, rs, rt);
3132   if (!L) {
3133     if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3134       DCHECK(is_int26(offset));
3135       return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
3136     } else {
3137       DCHECK(is_int16(offset));
3138       return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3139     }
3140   } else {
3141     DCHECK(offset == 0);
3142     if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3143       return BranchShortHelperR6(0, L, cond, rs, rt);
3144     } else {
3145       return BranchShortHelper(0, L, cond, rs, rt, bdslot);
3146     }
3147   }
3148   return false;
3149 }
3150 
3151 
BranchShort(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3152 void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
3153                                  const Operand& rt, BranchDelaySlot bdslot) {
3154   BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3155 }
3156 
3157 
BranchShort(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3158 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
3159                                  const Operand& rt, BranchDelaySlot bdslot) {
3160   BranchShortCheck(0, L, cond, rs, rt, bdslot);
3161 }
3162 
3163 
BranchAndLink(int32_t offset,BranchDelaySlot bdslot)3164 void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
3165   BranchAndLinkShort(offset, bdslot);
3166 }
3167 
3168 
BranchAndLink(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3169 void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
3170                                    const Operand& rt, BranchDelaySlot bdslot) {
3171   bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3172   DCHECK(is_near);
3173   USE(is_near);
3174 }
3175 
3176 
BranchAndLink(Label * L,BranchDelaySlot bdslot)3177 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
3178   if (L->is_bound()) {
3179     if (is_near_branch(L)) {
3180       BranchAndLinkShort(L, bdslot);
3181     } else {
3182       BranchAndLinkLong(L, bdslot);
3183     }
3184   } else {
3185     if (is_trampoline_emitted()) {
3186       BranchAndLinkLong(L, bdslot);
3187     } else {
3188       BranchAndLinkShort(L, bdslot);
3189     }
3190   }
3191 }
3192 
3193 
BranchAndLink(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3194 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
3195                                    const Operand& rt,
3196                                    BranchDelaySlot bdslot) {
3197   if (L->is_bound()) {
3198     if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
3199       Label skip;
3200       Condition neg_cond = NegateCondition(cond);
3201       BranchShort(&skip, neg_cond, rs, rt);
3202       BranchAndLinkLong(L, bdslot);
3203       bind(&skip);
3204     }
3205   } else {
3206     if (is_trampoline_emitted()) {
3207       Label skip;
3208       Condition neg_cond = NegateCondition(cond);
3209       BranchShort(&skip, neg_cond, rs, rt);
3210       BranchAndLinkLong(L, bdslot);
3211       bind(&skip);
3212     } else {
3213       BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
3214     }
3215   }
3216 }
3217 
3218 
BranchAndLinkShortHelper(int16_t offset,Label * L,BranchDelaySlot bdslot)3219 void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3220                                               BranchDelaySlot bdslot) {
3221   DCHECK(L == nullptr || offset == 0);
3222   offset = GetOffset(offset, L, OffsetSize::kOffset16);
3223   bal(offset);
3224 
3225   // Emit a nop in the branch delay slot if required.
3226   if (bdslot == PROTECT)
3227     nop();
3228 }
3229 
3230 
BranchAndLinkShortHelperR6(int32_t offset,Label * L)3231 void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
3232   DCHECK(L == nullptr || offset == 0);
3233   offset = GetOffset(offset, L, OffsetSize::kOffset26);
3234   balc(offset);
3235 }
3236 
3237 
BranchAndLinkShort(int32_t offset,BranchDelaySlot bdslot)3238 void MacroAssembler::BranchAndLinkShort(int32_t offset,
3239                                         BranchDelaySlot bdslot) {
3240   if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3241     DCHECK(is_int26(offset));
3242     BranchAndLinkShortHelperR6(offset, nullptr);
3243   } else {
3244     DCHECK(is_int16(offset));
3245     BranchAndLinkShortHelper(offset, nullptr, bdslot);
3246   }
3247 }
3248 
3249 
BranchAndLinkShort(Label * L,BranchDelaySlot bdslot)3250 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
3251   if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3252     BranchAndLinkShortHelperR6(0, L);
3253   } else {
3254     BranchAndLinkShortHelper(0, L, bdslot);
3255   }
3256 }
3257 
3258 
BranchAndLinkShortHelperR6(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt)3259 bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
3260                                                 Condition cond, Register rs,
3261                                                 const Operand& rt) {
3262   DCHECK(L == nullptr || offset == 0);
3263   Register scratch = rs.is(at) ? t8 : at;
3264   OffsetSize bits = OffsetSize::kOffset16;
3265 
3266   BlockTrampolinePoolScope block_trampoline_pool(this);
3267   DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
3268   switch (cond) {
3269     case cc_always:
3270       bits = OffsetSize::kOffset26;
3271       if (!is_near(L, bits)) return false;
3272       offset = GetOffset(offset, L, bits);
3273       balc(offset);
3274       break;
3275     case eq:
3276       if (!is_near(L, bits)) return false;
3277       Subu(scratch, rs, rt);
3278       offset = GetOffset(offset, L, bits);
3279       beqzalc(scratch, offset);
3280       break;
3281     case ne:
3282       if (!is_near(L, bits)) return false;
3283       Subu(scratch, rs, rt);
3284       offset = GetOffset(offset, L, bits);
3285       bnezalc(scratch, offset);
3286       break;
3287 
3288     // Signed comparison.
3289     case greater:
3290       // rs > rt
3291       if (rs.code() == rt.rm_.reg_code) {
3292         break;  // No code needs to be emitted.
3293       } else if (rs.is(zero_reg)) {
3294         if (!is_near(L, bits)) return false;
3295         scratch = GetRtAsRegisterHelper(rt, scratch);
3296         offset = GetOffset(offset, L, bits);
3297         bltzalc(scratch, offset);
3298       } else if (IsZero(rt)) {
3299         if (!is_near(L, bits)) return false;
3300         offset = GetOffset(offset, L, bits);
3301         bgtzalc(rs, offset);
3302       } else {
3303         if (!is_near(L, bits)) return false;
3304         Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3305         offset = GetOffset(offset, L, bits);
3306         bnezalc(scratch, offset);
3307       }
3308       break;
3309     case greater_equal:
3310       // rs >= rt
3311       if (rs.code() == rt.rm_.reg_code) {
3312         bits = OffsetSize::kOffset26;
3313         if (!is_near(L, bits)) return false;
3314         offset = GetOffset(offset, L, bits);
3315         balc(offset);
3316       } else if (rs.is(zero_reg)) {
3317         if (!is_near(L, bits)) return false;
3318         scratch = GetRtAsRegisterHelper(rt, scratch);
3319         offset = GetOffset(offset, L, bits);
3320         blezalc(scratch, offset);
3321       } else if (IsZero(rt)) {
3322         if (!is_near(L, bits)) return false;
3323         offset = GetOffset(offset, L, bits);
3324         bgezalc(rs, offset);
3325       } else {
3326         if (!is_near(L, bits)) return false;
3327         Slt(scratch, rs, rt);
3328         offset = GetOffset(offset, L, bits);
3329         beqzalc(scratch, offset);
3330       }
3331       break;
3332     case less:
3333       // rs < rt
3334       if (rs.code() == rt.rm_.reg_code) {
3335         break;  // No code needs to be emitted.
3336       } else if (rs.is(zero_reg)) {
3337         if (!is_near(L, bits)) return false;
3338         scratch = GetRtAsRegisterHelper(rt, scratch);
3339         offset = GetOffset(offset, L, bits);
3340         bgtzalc(scratch, offset);
3341       } else if (IsZero(rt)) {
3342         if (!is_near(L, bits)) return false;
3343         offset = GetOffset(offset, L, bits);
3344         bltzalc(rs, offset);
3345       } else {
3346         if (!is_near(L, bits)) return false;
3347         Slt(scratch, rs, rt);
3348         offset = GetOffset(offset, L, bits);
3349         bnezalc(scratch, offset);
3350       }
3351       break;
3352     case less_equal:
3353       // rs <= r2
3354       if (rs.code() == rt.rm_.reg_code) {
3355         bits = OffsetSize::kOffset26;
3356         if (!is_near(L, bits)) return false;
3357         offset = GetOffset(offset, L, bits);
3358         balc(offset);
3359       } else if (rs.is(zero_reg)) {
3360         if (!is_near(L, bits)) return false;
3361         scratch = GetRtAsRegisterHelper(rt, scratch);
3362         offset = GetOffset(offset, L, bits);
3363         bgezalc(scratch, offset);
3364       } else if (IsZero(rt)) {
3365         if (!is_near(L, bits)) return false;
3366         offset = GetOffset(offset, L, bits);
3367         blezalc(rs, offset);
3368       } else {
3369         if (!is_near(L, bits)) return false;
3370         Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3371         offset = GetOffset(offset, L, bits);
3372         beqzalc(scratch, offset);
3373       }
3374       break;
3375 
3376 
3377     // Unsigned comparison.
3378     case Ugreater:
3379       // rs > r2
3380       if (!is_near(L, bits)) return false;
3381       Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3382       offset = GetOffset(offset, L, bits);
3383       bnezalc(scratch, offset);
3384       break;
3385     case Ugreater_equal:
3386       // rs >= r2
3387       if (!is_near(L, bits)) return false;
3388       Sltu(scratch, rs, rt);
3389       offset = GetOffset(offset, L, bits);
3390       beqzalc(scratch, offset);
3391       break;
3392     case Uless:
3393       // rs < r2
3394       if (!is_near(L, bits)) return false;
3395       Sltu(scratch, rs, rt);
3396       offset = GetOffset(offset, L, bits);
3397       bnezalc(scratch, offset);
3398       break;
3399     case Uless_equal:
3400       // rs <= r2
3401       if (!is_near(L, bits)) return false;
3402       Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3403       offset = GetOffset(offset, L, bits);
3404       beqzalc(scratch, offset);
3405       break;
3406     default:
3407       UNREACHABLE();
3408   }
3409   return true;
3410 }
3411 
3412 
3413 // Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
3414 // with the slt instructions. We could use sub or add instead but we would miss
3415 // overflow cases, so we keep slt and add an intermediate third instruction.
BranchAndLinkShortHelper(int16_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3416 bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3417                                               Condition cond, Register rs,
3418                                               const Operand& rt,
3419                                               BranchDelaySlot bdslot) {
3420   DCHECK(L == nullptr || offset == 0);
3421   if (!is_near(L, OffsetSize::kOffset16)) return false;
3422 
3423   Register scratch = t8;
3424   BlockTrampolinePoolScope block_trampoline_pool(this);
3425 
3426   switch (cond) {
3427     case cc_always:
3428       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3429       bal(offset);
3430       break;
3431     case eq:
3432       bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3433       nop();
3434       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3435       bal(offset);
3436       break;
3437     case ne:
3438       beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3439       nop();
3440       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3441       bal(offset);
3442       break;
3443 
3444     // Signed comparison.
3445     case greater:
3446       Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3447       addiu(scratch, scratch, -1);
3448       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3449       bgezal(scratch, offset);
3450       break;
3451     case greater_equal:
3452       Slt(scratch, rs, rt);
3453       addiu(scratch, scratch, -1);
3454       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3455       bltzal(scratch, offset);
3456       break;
3457     case less:
3458       Slt(scratch, rs, rt);
3459       addiu(scratch, scratch, -1);
3460       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3461       bgezal(scratch, offset);
3462       break;
3463     case less_equal:
3464       Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3465       addiu(scratch, scratch, -1);
3466       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3467       bltzal(scratch, offset);
3468       break;
3469 
3470     // Unsigned comparison.
3471     case Ugreater:
3472       Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3473       addiu(scratch, scratch, -1);
3474       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3475       bgezal(scratch, offset);
3476       break;
3477     case Ugreater_equal:
3478       Sltu(scratch, rs, rt);
3479       addiu(scratch, scratch, -1);
3480       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3481       bltzal(scratch, offset);
3482       break;
3483     case Uless:
3484       Sltu(scratch, rs, rt);
3485       addiu(scratch, scratch, -1);
3486       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3487       bgezal(scratch, offset);
3488       break;
3489     case Uless_equal:
3490       Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3491       addiu(scratch, scratch, -1);
3492       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3493       bltzal(scratch, offset);
3494       break;
3495 
3496     default:
3497       UNREACHABLE();
3498   }
3499 
3500   // Emit a nop in the branch delay slot if required.
3501   if (bdslot == PROTECT)
3502     nop();
3503 
3504   return true;
3505 }
3506 
3507 
BranchAndLinkShortCheck(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3508 bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
3509                                              Condition cond, Register rs,
3510                                              const Operand& rt,
3511                                              BranchDelaySlot bdslot) {
3512   BRANCH_ARGS_CHECK(cond, rs, rt);
3513 
3514   if (!L) {
3515     if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3516       DCHECK(is_int26(offset));
3517       return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
3518     } else {
3519       DCHECK(is_int16(offset));
3520       return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3521     }
3522   } else {
3523     DCHECK(offset == 0);
3524     if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3525       return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
3526     } else {
3527       return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
3528     }
3529   }
3530   return false;
3531 }
3532 
3533 
Jump(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3534 void MacroAssembler::Jump(Register target,
3535                           Condition cond,
3536                           Register rs,
3537                           const Operand& rt,
3538                           BranchDelaySlot bd) {
3539   BlockTrampolinePoolScope block_trampoline_pool(this);
3540   if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
3541     if (cond == cc_always) {
3542       jic(target, 0);
3543     } else {
3544       BRANCH_ARGS_CHECK(cond, rs, rt);
3545       Branch(2, NegateCondition(cond), rs, rt);
3546       jic(target, 0);
3547     }
3548   } else {
3549     if (cond == cc_always) {
3550       jr(target);
3551     } else {
3552       BRANCH_ARGS_CHECK(cond, rs, rt);
3553       Branch(2, NegateCondition(cond), rs, rt);
3554       jr(target);
3555     }
3556     // Emit a nop in the branch delay slot if required.
3557     if (bd == PROTECT) nop();
3558   }
3559 }
3560 
3561 
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3562 void MacroAssembler::Jump(intptr_t target,
3563                           RelocInfo::Mode rmode,
3564                           Condition cond,
3565                           Register rs,
3566                           const Operand& rt,
3567                           BranchDelaySlot bd) {
3568   Label skip;
3569   if (cond != cc_always) {
3570     Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
3571   }
3572   // The first instruction of 'li' may be placed in the delay slot.
3573   // This is not an issue, t9 is expected to be clobbered anyway.
3574   li(t9, Operand(target, rmode));
3575   Jump(t9, al, zero_reg, Operand(zero_reg), bd);
3576   bind(&skip);
3577 }
3578 
3579 
Jump(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3580 void MacroAssembler::Jump(Address target,
3581                           RelocInfo::Mode rmode,
3582                           Condition cond,
3583                           Register rs,
3584                           const Operand& rt,
3585                           BranchDelaySlot bd) {
3586   DCHECK(!RelocInfo::IsCodeTarget(rmode));
3587   Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
3588 }
3589 
3590 
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3591 void MacroAssembler::Jump(Handle<Code> code,
3592                           RelocInfo::Mode rmode,
3593                           Condition cond,
3594                           Register rs,
3595                           const Operand& rt,
3596                           BranchDelaySlot bd) {
3597   DCHECK(RelocInfo::IsCodeTarget(rmode));
3598   AllowDeferredHandleDereference embedding_raw_address;
3599   Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
3600 }
3601 
3602 
CallSize(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3603 int MacroAssembler::CallSize(Register target,
3604                              Condition cond,
3605                              Register rs,
3606                              const Operand& rt,
3607                              BranchDelaySlot bd) {
3608   int size = 0;
3609 
3610   if (cond == cc_always) {
3611     size += 1;
3612   } else {
3613     size += 3;
3614   }
3615 
3616   if (bd == PROTECT && !IsMipsArchVariant(kMips32r6)) size += 1;
3617 
3618   return size * kInstrSize;
3619 }
3620 
3621 
3622 // Note: To call gcc-compiled C code on mips, you must call thru t9.
Call(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3623 void MacroAssembler::Call(Register target,
3624                           Condition cond,
3625                           Register rs,
3626                           const Operand& rt,
3627                           BranchDelaySlot bd) {
3628 #ifdef DEBUG
3629   int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
3630 #endif
3631 
3632   BlockTrampolinePoolScope block_trampoline_pool(this);
3633   Label start;
3634   bind(&start);
3635   if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
3636     if (cond == cc_always) {
3637       jialc(target, 0);
3638     } else {
3639       BRANCH_ARGS_CHECK(cond, rs, rt);
3640       Branch(2, NegateCondition(cond), rs, rt);
3641       jialc(target, 0);
3642     }
3643   } else {
3644     if (cond == cc_always) {
3645       jalr(target);
3646     } else {
3647       BRANCH_ARGS_CHECK(cond, rs, rt);
3648       Branch(2, NegateCondition(cond), rs, rt);
3649       jalr(target);
3650     }
3651     // Emit a nop in the branch delay slot if required.
3652     if (bd == PROTECT) nop();
3653   }
3654 
3655 #ifdef DEBUG
3656   CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
3657            SizeOfCodeGeneratedSince(&start));
3658 #endif
3659 }
3660 
3661 
CallSize(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3662 int MacroAssembler::CallSize(Address target,
3663                              RelocInfo::Mode rmode,
3664                              Condition cond,
3665                              Register rs,
3666                              const Operand& rt,
3667                              BranchDelaySlot bd) {
3668   int size = CallSize(t9, cond, rs, rt, bd);
3669   return size + 2 * kInstrSize;
3670 }
3671 
3672 
Call(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3673 void MacroAssembler::Call(Address target,
3674                           RelocInfo::Mode rmode,
3675                           Condition cond,
3676                           Register rs,
3677                           const Operand& rt,
3678                           BranchDelaySlot bd) {
3679   BlockTrampolinePoolScope block_trampoline_pool(this);
3680   Label start;
3681   bind(&start);
3682   int32_t target_int = reinterpret_cast<int32_t>(target);
3683   li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
3684   Call(t9, cond, rs, rt, bd);
3685   DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
3686             SizeOfCodeGeneratedSince(&start));
3687 }
3688 
3689 
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3690 int MacroAssembler::CallSize(Handle<Code> code,
3691                              RelocInfo::Mode rmode,
3692                              TypeFeedbackId ast_id,
3693                              Condition cond,
3694                              Register rs,
3695                              const Operand& rt,
3696                              BranchDelaySlot bd) {
3697   AllowDeferredHandleDereference using_raw_address;
3698   return CallSize(reinterpret_cast<Address>(code.location()),
3699       rmode, cond, rs, rt, bd);
3700 }
3701 
3702 
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3703 void MacroAssembler::Call(Handle<Code> code,
3704                           RelocInfo::Mode rmode,
3705                           TypeFeedbackId ast_id,
3706                           Condition cond,
3707                           Register rs,
3708                           const Operand& rt,
3709                           BranchDelaySlot bd) {
3710   BlockTrampolinePoolScope block_trampoline_pool(this);
3711   Label start;
3712   bind(&start);
3713   DCHECK(RelocInfo::IsCodeTarget(rmode));
3714   if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
3715     SetRecordedAstId(ast_id);
3716     rmode = RelocInfo::CODE_TARGET_WITH_ID;
3717   }
3718   AllowDeferredHandleDereference embedding_raw_address;
3719   Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
3720   DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
3721             SizeOfCodeGeneratedSince(&start));
3722 }
3723 
3724 
Ret(Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3725 void MacroAssembler::Ret(Condition cond,
3726                          Register rs,
3727                          const Operand& rt,
3728                          BranchDelaySlot bd) {
3729   Jump(ra, cond, rs, rt, bd);
3730 }
3731 
3732 
BranchLong(Label * L,BranchDelaySlot bdslot)3733 void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
3734   if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
3735       (!L->is_bound() || is_near_r6(L))) {
3736     BranchShortHelperR6(0, L);
3737   } else {
3738     BlockTrampolinePoolScope block_trampoline_pool(this);
3739     uint32_t imm32;
3740     imm32 = jump_address(L);
3741     if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3742       uint32_t lui_offset, jic_offset;
3743       UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
3744       {
3745         BlockGrowBufferScope block_buf_growth(this);
3746         // Buffer growth (and relocation) must be blocked for internal
3747         // references until associated instructions are emitted and
3748         // available to be patched.
3749         RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3750         lui(at, lui_offset);
3751         jic(at, jic_offset);
3752       }
3753       CheckBuffer();
3754     } else {
3755       {
3756         BlockGrowBufferScope block_buf_growth(this);
3757         // Buffer growth (and relocation) must be blocked for internal
3758         // references
3759         // until associated instructions are emitted and available to be
3760         // patched.
3761         RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3762         lui(at, (imm32 & kHiMask) >> kLuiShift);
3763         ori(at, at, (imm32 & kImm16Mask));
3764       }
3765       CheckBuffer();
3766       jr(at);
3767       // Emit a nop in the branch delay slot if required.
3768       if (bdslot == PROTECT) nop();
3769     }
3770   }
3771 }
3772 
3773 
BranchAndLinkLong(Label * L,BranchDelaySlot bdslot)3774 void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
3775   if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
3776       (!L->is_bound() || is_near_r6(L))) {
3777     BranchAndLinkShortHelperR6(0, L);
3778   } else {
3779     BlockTrampolinePoolScope block_trampoline_pool(this);
3780     uint32_t imm32;
3781     imm32 = jump_address(L);
3782     if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3783       uint32_t lui_offset, jic_offset;
3784       UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
3785       {
3786         BlockGrowBufferScope block_buf_growth(this);
3787         // Buffer growth (and relocation) must be blocked for internal
3788         // references until associated instructions are emitted and
3789         // available to be patched.
3790         RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3791         lui(at, lui_offset);
3792         jialc(at, jic_offset);
3793       }
3794       CheckBuffer();
3795     } else {
3796       {
3797         BlockGrowBufferScope block_buf_growth(this);
3798         // Buffer growth (and relocation) must be blocked for internal
3799         // references
3800         // until associated instructions are emitted and available to be
3801         // patched.
3802         RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3803         lui(at, (imm32 & kHiMask) >> kLuiShift);
3804         ori(at, at, (imm32 & kImm16Mask));
3805       }
3806       CheckBuffer();
3807       jalr(at);
3808       // Emit a nop in the branch delay slot if required.
3809       if (bdslot == PROTECT) nop();
3810     }
3811   }
3812 }
3813 
3814 
DropAndRet(int drop)3815 void MacroAssembler::DropAndRet(int drop) {
3816   DCHECK(is_int16(drop * kPointerSize));
3817   Ret(USE_DELAY_SLOT);
3818   addiu(sp, sp, drop * kPointerSize);
3819 }
3820 
DropAndRet(int drop,Condition cond,Register r1,const Operand & r2)3821 void MacroAssembler::DropAndRet(int drop,
3822                                 Condition cond,
3823                                 Register r1,
3824                                 const Operand& r2) {
3825   // Both Drop and Ret need to be conditional.
3826   Label skip;
3827   if (cond != cc_always) {
3828     Branch(&skip, NegateCondition(cond), r1, r2);
3829   }
3830 
3831   Drop(drop);
3832   Ret();
3833 
3834   if (cond != cc_always) {
3835     bind(&skip);
3836   }
3837 }
3838 
3839 
Drop(int count,Condition cond,Register reg,const Operand & op)3840 void MacroAssembler::Drop(int count,
3841                           Condition cond,
3842                           Register reg,
3843                           const Operand& op) {
3844   if (count <= 0) {
3845     return;
3846   }
3847 
3848   Label skip;
3849 
3850   if (cond != al) {
3851      Branch(&skip, NegateCondition(cond), reg, op);
3852   }
3853 
3854   Addu(sp, sp, Operand(count * kPointerSize));
3855 
3856   if (cond != al) {
3857     bind(&skip);
3858   }
3859 }
3860 
3861 
3862 
Swap(Register reg1,Register reg2,Register scratch)3863 void MacroAssembler::Swap(Register reg1,
3864                           Register reg2,
3865                           Register scratch) {
3866   if (scratch.is(no_reg)) {
3867     Xor(reg1, reg1, Operand(reg2));
3868     Xor(reg2, reg2, Operand(reg1));
3869     Xor(reg1, reg1, Operand(reg2));
3870   } else {
3871     mov(scratch, reg1);
3872     mov(reg1, reg2);
3873     mov(reg2, scratch);
3874   }
3875 }
3876 
3877 
Call(Label * target)3878 void MacroAssembler::Call(Label* target) {
3879   BranchAndLink(target);
3880 }
3881 
3882 
Push(Handle<Object> handle)3883 void MacroAssembler::Push(Handle<Object> handle) {
3884   li(at, Operand(handle));
3885   push(at);
3886 }
3887 
MaybeDropFrames()3888 void MacroAssembler::MaybeDropFrames() {
3889   // Check whether we need to drop frames to restart a function on the stack.
3890   ExternalReference restart_fp =
3891       ExternalReference::debug_restart_fp_address(isolate());
3892   li(a1, Operand(restart_fp));
3893   lw(a1, MemOperand(a1));
3894   Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
3895        ne, a1, Operand(zero_reg));
3896 }
3897 
3898 // ---------------------------------------------------------------------------
3899 // Exception handling.
3900 
PushStackHandler()3901 void MacroAssembler::PushStackHandler() {
3902   // Adjust this code if not the case.
3903   STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
3904   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3905 
3906   // Link the current handler as the next handler.
3907   li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3908   lw(t1, MemOperand(t2));
3909   push(t1);
3910 
3911   // Set this new handler as the current one.
3912   sw(sp, MemOperand(t2));
3913 }
3914 
3915 
PopStackHandler()3916 void MacroAssembler::PopStackHandler() {
3917   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3918   pop(a1);
3919   Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
3920   li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3921   sw(a1, MemOperand(at));
3922 }
3923 
3924 
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)3925 void MacroAssembler::Allocate(int object_size,
3926                               Register result,
3927                               Register scratch1,
3928                               Register scratch2,
3929                               Label* gc_required,
3930                               AllocationFlags flags) {
3931   DCHECK(object_size <= kMaxRegularHeapObjectSize);
3932   DCHECK((flags & ALLOCATION_FOLDED) == 0);
3933   if (!FLAG_inline_new) {
3934     if (emit_debug_code()) {
3935       // Trash the registers to simulate an allocation failure.
3936       li(result, 0x7091);
3937       li(scratch1, 0x7191);
3938       li(scratch2, 0x7291);
3939     }
3940     jmp(gc_required);
3941     return;
3942   }
3943 
3944   DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
3945 
3946   // Make object size into bytes.
3947   if ((flags & SIZE_IN_WORDS) != 0) {
3948     object_size *= kPointerSize;
3949   }
3950   DCHECK_EQ(0, object_size & kObjectAlignmentMask);
3951 
3952   // Check relative positions of allocation top and limit addresses.
3953   // ARM adds additional checks to make sure the ldm instruction can be
3954   // used. On MIPS we don't have ldm so we don't need additional checks either.
3955   ExternalReference allocation_top =
3956       AllocationUtils::GetAllocationTopReference(isolate(), flags);
3957   ExternalReference allocation_limit =
3958       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3959 
3960   intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
3961   intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
3962   DCHECK((limit - top) == kPointerSize);
3963 
3964   // Set up allocation top address and allocation limit registers.
3965   Register top_address = scratch1;
3966   // This code stores a temporary value in t9.
3967   Register alloc_limit = t9;
3968   Register result_end = scratch2;
3969   li(top_address, Operand(allocation_top));
3970 
3971   if ((flags & RESULT_CONTAINS_TOP) == 0) {
3972     // Load allocation top into result and allocation limit into alloc_limit.
3973     lw(result, MemOperand(top_address));
3974     lw(alloc_limit, MemOperand(top_address, kPointerSize));
3975   } else {
3976     if (emit_debug_code()) {
3977       // Assert that result actually contains top on entry.
3978       lw(alloc_limit, MemOperand(top_address));
3979       Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
3980     }
3981     // Load allocation limit. Result already contains allocation top.
3982     lw(alloc_limit, MemOperand(top_address, limit - top));
3983   }
3984 
3985   if ((flags & DOUBLE_ALIGNMENT) != 0) {
3986     // Align the next allocation. Storing the filler map without checking top is
3987     // safe in new-space because the limit of the heap is aligned there.
3988     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
3989     And(result_end, result, Operand(kDoubleAlignmentMask));
3990     Label aligned;
3991     Branch(&aligned, eq, result_end, Operand(zero_reg));
3992     if ((flags & PRETENURE) != 0) {
3993       Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit));
3994     }
3995     li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
3996     sw(result_end, MemOperand(result));
3997     Addu(result, result, Operand(kDoubleSize / 2));
3998     bind(&aligned);
3999   }
4000 
4001   // Calculate new top and bail out if new space is exhausted. Use result
4002   // to calculate the new top.
4003   Addu(result_end, result, Operand(object_size));
4004   Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
4005 
4006   if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
4007     // The top pointer is not updated for allocation folding dominators.
4008     sw(result_end, MemOperand(top_address));
4009   }
4010 
4011   // Tag object.
4012   Addu(result, result, Operand(kHeapObjectTag));
4013 }
4014 
4015 
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4016 void MacroAssembler::Allocate(Register object_size, Register result,
4017                               Register result_end, Register scratch,
4018                               Label* gc_required, AllocationFlags flags) {
4019   DCHECK((flags & ALLOCATION_FOLDED) == 0);
4020   if (!FLAG_inline_new) {
4021     if (emit_debug_code()) {
4022       // Trash the registers to simulate an allocation failure.
4023       li(result, 0x7091);
4024       li(scratch, 0x7191);
4025       li(result_end, 0x7291);
4026     }
4027     jmp(gc_required);
4028     return;
4029   }
4030 
4031   // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
4032   // is not specified. Other registers must not overlap.
4033   DCHECK(!AreAliased(object_size, result, scratch, t9, at));
4034   DCHECK(!AreAliased(result_end, result, scratch, t9, at));
4035   DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
4036 
4037   // Check relative positions of allocation top and limit addresses.
4038   // ARM adds additional checks to make sure the ldm instruction can be
4039   // used. On MIPS we don't have ldm so we don't need additional checks either.
4040   ExternalReference allocation_top =
4041       AllocationUtils::GetAllocationTopReference(isolate(), flags);
4042   ExternalReference allocation_limit =
4043       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4044   intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
4045   intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
4046   DCHECK((limit - top) == kPointerSize);
4047 
4048   // Set up allocation top address and allocation limit registers.
4049   Register top_address = scratch;
4050   // This code stores a temporary value in t9.
4051   Register alloc_limit = t9;
4052   li(top_address, Operand(allocation_top));
4053 
4054   if ((flags & RESULT_CONTAINS_TOP) == 0) {
4055     // Load allocation top into result and allocation limit into alloc_limit.
4056     lw(result, MemOperand(top_address));
4057     lw(alloc_limit, MemOperand(top_address, kPointerSize));
4058   } else {
4059     if (emit_debug_code()) {
4060       // Assert that result actually contains top on entry.
4061       lw(alloc_limit, MemOperand(top_address));
4062       Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
4063     }
4064     // Load allocation limit. Result already contains allocation top.
4065     lw(alloc_limit, MemOperand(top_address, limit - top));
4066   }
4067 
4068   if ((flags & DOUBLE_ALIGNMENT) != 0) {
4069     // Align the next allocation. Storing the filler map without checking top is
4070     // safe in new-space because the limit of the heap is aligned there.
4071     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4072     And(result_end, result, Operand(kDoubleAlignmentMask));
4073     Label aligned;
4074     Branch(&aligned, eq, result_end, Operand(zero_reg));
4075     if ((flags & PRETENURE) != 0) {
4076       Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit));
4077     }
4078     li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
4079     sw(result_end, MemOperand(result));
4080     Addu(result, result, Operand(kDoubleSize / 2));
4081     bind(&aligned);
4082   }
4083 
4084   // Calculate new top and bail out if new space is exhausted. Use result
4085   // to calculate the new top. Object size may be in words so a shift is
4086   // required to get the number of bytes.
4087   if ((flags & SIZE_IN_WORDS) != 0) {
4088     Lsa(result_end, result, object_size, kPointerSizeLog2);
4089   } else {
4090     Addu(result_end, result, Operand(object_size));
4091   }
4092 
4093   Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
4094 
4095   // Update allocation top. result temporarily holds the new top.
4096   if (emit_debug_code()) {
4097     And(alloc_limit, result_end, Operand(kObjectAlignmentMask));
4098     Check(eq, kUnalignedAllocationInNewSpace, alloc_limit, Operand(zero_reg));
4099   }
4100 
4101   if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
4102     // The top pointer is not updated for allocation folding dominators.
4103     sw(result_end, MemOperand(top_address));
4104   }
4105 
4106   // Tag object.
4107   Addu(result, result, Operand(kHeapObjectTag));
4108 }
4109 
FastAllocate(int object_size,Register result,Register scratch1,Register scratch2,AllocationFlags flags)4110 void MacroAssembler::FastAllocate(int object_size, Register result,
4111                                   Register scratch1, Register scratch2,
4112                                   AllocationFlags flags) {
4113   DCHECK(object_size <= kMaxRegularHeapObjectSize);
4114   DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
4115 
4116   // Make object size into bytes.
4117   if ((flags & SIZE_IN_WORDS) != 0) {
4118     object_size *= kPointerSize;
4119   }
4120   DCHECK_EQ(0, object_size & kObjectAlignmentMask);
4121 
4122   ExternalReference allocation_top =
4123       AllocationUtils::GetAllocationTopReference(isolate(), flags);
4124 
4125   // Set up allocation top address and allocation limit registers.
4126   Register top_address = scratch1;
4127   // This code stores a temporary value in t9.
4128   Register result_end = scratch2;
4129   li(top_address, Operand(allocation_top));
4130   lw(result, MemOperand(top_address));
4131 
4132   if ((flags & DOUBLE_ALIGNMENT) != 0) {
4133     // Align the next allocation. Storing the filler map without checking top is
4134     // safe in new-space because the limit of the heap is aligned there.
4135     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4136     And(result_end, result, Operand(kDoubleAlignmentMask));
4137     Label aligned;
4138     Branch(&aligned, eq, result_end, Operand(zero_reg));
4139     li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
4140     sw(result_end, MemOperand(result));
4141     Addu(result, result, Operand(kDoubleSize / 2));
4142     bind(&aligned);
4143   }
4144 
4145   Addu(result_end, result, Operand(object_size));
4146 
4147   // The top pointer is not updated for allocation folding dominators.
4148   sw(result_end, MemOperand(top_address));
4149 
4150   Addu(result, result, Operand(kHeapObjectTag));
4151 }
4152 
FastAllocate(Register object_size,Register result,Register result_end,Register scratch,AllocationFlags flags)4153 void MacroAssembler::FastAllocate(Register object_size, Register result,
4154                                   Register result_end, Register scratch,
4155                                   AllocationFlags flags) {
4156   // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
4157   // is not specified. Other registers must not overlap.
4158   DCHECK(!AreAliased(object_size, result, scratch, t9, at));
4159   DCHECK(!AreAliased(result_end, result, scratch, t9, at));
4160   DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
4161 
4162   ExternalReference allocation_top =
4163       AllocationUtils::GetAllocationTopReference(isolate(), flags);
4164 
4165   // Set up allocation top address and allocation limit registers.
4166   Register top_address = scratch;
4167   // This code stores a temporary value in t9.
4168   li(top_address, Operand(allocation_top));
4169   lw(result, MemOperand(top_address));
4170 
4171   if ((flags & DOUBLE_ALIGNMENT) != 0) {
4172     // Align the next allocation. Storing the filler map without checking top is
4173     // safe in new-space because the limit of the heap is aligned there.
4174     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4175     And(result_end, result, Operand(kDoubleAlignmentMask));
4176     Label aligned;
4177     Branch(&aligned, eq, result_end, Operand(zero_reg));
4178     li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
4179     sw(result_end, MemOperand(result));
4180     Addu(result, result, Operand(kDoubleSize / 2));
4181     bind(&aligned);
4182   }
4183 
4184   // Calculate new top and bail out if new space is exhausted. Use result
4185   // to calculate the new top. Object size may be in words so a shift is
4186   // required to get the number of bytes.
4187   if ((flags & SIZE_IN_WORDS) != 0) {
4188     Lsa(result_end, result, object_size, kPointerSizeLog2);
4189   } else {
4190     Addu(result_end, result, Operand(object_size));
4191   }
4192 
4193   // The top pointer is not updated for allocation folding dominators.
4194   sw(result_end, MemOperand(top_address));
4195 
4196   Addu(result, result, Operand(kHeapObjectTag));
4197 }
4198 
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)4199 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
4200                                                      Label* not_unique_name) {
4201   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
4202   Label succeed;
4203   And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4204   Branch(&succeed, eq, at, Operand(zero_reg));
4205   Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
4206 
4207   bind(&succeed);
4208 }
4209 
4210 
4211 // Allocates a heap number or jumps to the label if the young space is full and
4212 // a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * need_gc,MutableMode mode)4213 void MacroAssembler::AllocateHeapNumber(Register result,
4214                                         Register scratch1,
4215                                         Register scratch2,
4216                                         Register heap_number_map,
4217                                         Label* need_gc,
4218                                         MutableMode mode) {
4219   // Allocate an object in the heap for the heap number and tag it as a heap
4220   // object.
4221   Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
4222            NO_ALLOCATION_FLAGS);
4223 
4224   Heap::RootListIndex map_index = mode == MUTABLE
4225       ? Heap::kMutableHeapNumberMapRootIndex
4226       : Heap::kHeapNumberMapRootIndex;
4227   AssertIsRoot(heap_number_map, map_index);
4228 
4229   // Store heap number map in the allocated object.
4230   sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
4231 }
4232 
4233 
AllocateHeapNumberWithValue(Register result,FPURegister value,Register scratch1,Register scratch2,Label * gc_required)4234 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
4235                                                  FPURegister value,
4236                                                  Register scratch1,
4237                                                  Register scratch2,
4238                                                  Label* gc_required) {
4239   LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
4240   AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
4241   sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
4242 }
4243 
4244 
AllocateJSValue(Register result,Register constructor,Register value,Register scratch1,Register scratch2,Label * gc_required)4245 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
4246                                      Register value, Register scratch1,
4247                                      Register scratch2, Label* gc_required) {
4248   DCHECK(!result.is(constructor));
4249   DCHECK(!result.is(scratch1));
4250   DCHECK(!result.is(scratch2));
4251   DCHECK(!result.is(value));
4252 
4253   // Allocate JSValue in new space.
4254   Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
4255            NO_ALLOCATION_FLAGS);
4256 
4257   // Initialize the JSValue.
4258   LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
4259   sw(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
4260   LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
4261   sw(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
4262   sw(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
4263   sw(value, FieldMemOperand(result, JSValue::kValueOffset));
4264   STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
4265 }
4266 
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)4267 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
4268                                                 Register end_address,
4269                                                 Register filler) {
4270   Label loop, entry;
4271   Branch(&entry);
4272   bind(&loop);
4273   sw(filler, MemOperand(current_address));
4274   Addu(current_address, current_address, kPointerSize);
4275   bind(&entry);
4276   Branch(&loop, ult, current_address, Operand(end_address));
4277 }
4278 
CompareMapAndBranch(Register obj,Register scratch,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)4279 void MacroAssembler::CompareMapAndBranch(Register obj,
4280                                          Register scratch,
4281                                          Handle<Map> map,
4282                                          Label* early_success,
4283                                          Condition cond,
4284                                          Label* branch_to) {
4285   lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4286   CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
4287 }
4288 
4289 
CompareMapAndBranch(Register obj_map,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)4290 void MacroAssembler::CompareMapAndBranch(Register obj_map,
4291                                          Handle<Map> map,
4292                                          Label* early_success,
4293                                          Condition cond,
4294                                          Label* branch_to) {
4295   Branch(branch_to, cond, obj_map, Operand(map));
4296 }
4297 
4298 
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)4299 void MacroAssembler::CheckMap(Register obj,
4300                               Register scratch,
4301                               Handle<Map> map,
4302                               Label* fail,
4303                               SmiCheckType smi_check_type) {
4304   if (smi_check_type == DO_SMI_CHECK) {
4305     JumpIfSmi(obj, fail);
4306   }
4307   Label success;
4308   CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
4309   bind(&success);
4310 }
4311 
4312 
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)4313 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
4314                                      Register scratch2, Handle<WeakCell> cell,
4315                                      Handle<Code> success,
4316                                      SmiCheckType smi_check_type) {
4317   Label fail;
4318   if (smi_check_type == DO_SMI_CHECK) {
4319     JumpIfSmi(obj, &fail);
4320   }
4321   lw(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
4322   GetWeakValue(scratch2, cell);
4323   Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
4324   bind(&fail);
4325 }
4326 
4327 
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)4328 void MacroAssembler::CheckMap(Register obj,
4329                               Register scratch,
4330                               Heap::RootListIndex index,
4331                               Label* fail,
4332                               SmiCheckType smi_check_type) {
4333   if (smi_check_type == DO_SMI_CHECK) {
4334     JumpIfSmi(obj, fail);
4335   }
4336   lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4337   LoadRoot(at, index);
4338   Branch(fail, ne, scratch, Operand(at));
4339 }
4340 
FPUCanonicalizeNaN(const DoubleRegister dst,const DoubleRegister src)4341 void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
4342                                         const DoubleRegister src) {
4343   sub_d(dst, src, kDoubleRegZero);
4344 }
4345 
GetWeakValue(Register value,Handle<WeakCell> cell)4346 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
4347   li(value, Operand(cell));
4348   lw(value, FieldMemOperand(value, WeakCell::kValueOffset));
4349 }
4350 
4351 
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)4352 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
4353                                    Label* miss) {
4354   GetWeakValue(value, cell);
4355   JumpIfSmi(value, miss);
4356 }
4357 
4358 
MovFromFloatResult(DoubleRegister dst)4359 void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
4360   if (IsMipsSoftFloatABI) {
4361     if (kArchEndian == kLittle) {
4362       Move(dst, v0, v1);
4363     } else {
4364       Move(dst, v1, v0);
4365     }
4366   } else {
4367     Move(dst, f0);  // Reg f0 is o32 ABI FP return value.
4368   }
4369 }
4370 
4371 
MovFromFloatParameter(DoubleRegister dst)4372 void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
4373   if (IsMipsSoftFloatABI) {
4374     if (kArchEndian == kLittle) {
4375       Move(dst, a0, a1);
4376     } else {
4377       Move(dst, a1, a0);
4378     }
4379   } else {
4380     Move(dst, f12);  // Reg f12 is o32 ABI FP first argument value.
4381   }
4382 }
4383 
4384 
MovToFloatParameter(DoubleRegister src)4385 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
4386   if (!IsMipsSoftFloatABI) {
4387     Move(f12, src);
4388   } else {
4389     if (kArchEndian == kLittle) {
4390       Move(a0, a1, src);
4391     } else {
4392       Move(a1, a0, src);
4393     }
4394   }
4395 }
4396 
4397 
MovToFloatResult(DoubleRegister src)4398 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
4399   if (!IsMipsSoftFloatABI) {
4400     Move(f0, src);
4401   } else {
4402     if (kArchEndian == kLittle) {
4403       Move(v0, v1, src);
4404     } else {
4405       Move(v1, v0, src);
4406     }
4407   }
4408 }
4409 
4410 
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)4411 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
4412                                           DoubleRegister src2) {
4413   if (!IsMipsSoftFloatABI) {
4414     if (src2.is(f12)) {
4415       DCHECK(!src1.is(f14));
4416       Move(f14, src2);
4417       Move(f12, src1);
4418     } else {
4419       Move(f12, src1);
4420       Move(f14, src2);
4421     }
4422   } else {
4423     if (kArchEndian == kLittle) {
4424       Move(a0, a1, src1);
4425       Move(a2, a3, src2);
4426     } else {
4427       Move(a1, a0, src1);
4428       Move(a3, a2, src2);
4429     }
4430   }
4431 }
4432 
4433 
4434 // -----------------------------------------------------------------------------
4435 // JavaScript invokes.
4436 
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)4437 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
4438                                         Register caller_args_count_reg,
4439                                         Register scratch0, Register scratch1) {
4440 #if DEBUG
4441   if (callee_args_count.is_reg()) {
4442     DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
4443                        scratch1));
4444   } else {
4445     DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
4446   }
4447 #endif
4448 
4449   // Calculate the end of destination area where we will put the arguments
4450   // after we drop current frame. We add kPointerSize to count the receiver
4451   // argument which is not included into formal parameters count.
4452   Register dst_reg = scratch0;
4453   Lsa(dst_reg, fp, caller_args_count_reg, kPointerSizeLog2);
4454   Addu(dst_reg, dst_reg,
4455        Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
4456 
4457   Register src_reg = caller_args_count_reg;
4458   // Calculate the end of source area. +kPointerSize is for the receiver.
4459   if (callee_args_count.is_reg()) {
4460     Lsa(src_reg, sp, callee_args_count.reg(), kPointerSizeLog2);
4461     Addu(src_reg, src_reg, Operand(kPointerSize));
4462   } else {
4463     Addu(src_reg, sp,
4464          Operand((callee_args_count.immediate() + 1) * kPointerSize));
4465   }
4466 
4467   if (FLAG_debug_code) {
4468     Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
4469   }
4470 
4471   // Restore caller's frame pointer and return address now as they will be
4472   // overwritten by the copying loop.
4473   lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
4474   lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4475 
4476   // Now copy callee arguments to the caller frame going backwards to avoid
4477   // callee arguments corruption (source and destination areas could overlap).
4478 
4479   // Both src_reg and dst_reg are pointing to the word after the one to copy,
4480   // so they must be pre-decremented in the loop.
4481   Register tmp_reg = scratch1;
4482   Label loop, entry;
4483   Branch(&entry);
4484   bind(&loop);
4485   Subu(src_reg, src_reg, Operand(kPointerSize));
4486   Subu(dst_reg, dst_reg, Operand(kPointerSize));
4487   lw(tmp_reg, MemOperand(src_reg));
4488   sw(tmp_reg, MemOperand(dst_reg));
4489   bind(&entry);
4490   Branch(&loop, ne, sp, Operand(src_reg));
4491 
4492   // Leave current frame.
4493   mov(sp, dst_reg);
4494 }
4495 
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)4496 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4497                                     const ParameterCount& actual,
4498                                     Label* done,
4499                                     bool* definitely_mismatches,
4500                                     InvokeFlag flag,
4501                                     const CallWrapper& call_wrapper) {
4502   bool definitely_matches = false;
4503   *definitely_mismatches = false;
4504   Label regular_invoke;
4505 
4506   // Check whether the expected and actual arguments count match. If not,
4507   // setup registers according to contract with ArgumentsAdaptorTrampoline:
4508   //  a0: actual arguments count
4509   //  a1: function (passed through to callee)
4510   //  a2: expected arguments count
4511 
4512   // The code below is made a lot easier because the calling code already sets
4513   // up actual and expected registers according to the contract if values are
4514   // passed in registers.
4515   DCHECK(actual.is_immediate() || actual.reg().is(a0));
4516   DCHECK(expected.is_immediate() || expected.reg().is(a2));
4517 
4518   if (expected.is_immediate()) {
4519     DCHECK(actual.is_immediate());
4520     li(a0, Operand(actual.immediate()));
4521     if (expected.immediate() == actual.immediate()) {
4522       definitely_matches = true;
4523     } else {
4524       const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
4525       if (expected.immediate() == sentinel) {
4526         // Don't worry about adapting arguments for builtins that
4527         // don't want that done. Skip adaption code by making it look
4528         // like we have a match between expected and actual number of
4529         // arguments.
4530         definitely_matches = true;
4531       } else {
4532         *definitely_mismatches = true;
4533         li(a2, Operand(expected.immediate()));
4534       }
4535     }
4536   } else if (actual.is_immediate()) {
4537     li(a0, Operand(actual.immediate()));
4538     Branch(&regular_invoke, eq, expected.reg(), Operand(a0));
4539   } else {
4540     Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
4541   }
4542 
4543   if (!definitely_matches) {
4544     Handle<Code> adaptor =
4545         isolate()->builtins()->ArgumentsAdaptorTrampoline();
4546     if (flag == CALL_FUNCTION) {
4547       call_wrapper.BeforeCall(CallSize(adaptor));
4548       Call(adaptor);
4549       call_wrapper.AfterCall();
4550       if (!*definitely_mismatches) {
4551         Branch(done);
4552       }
4553     } else {
4554       Jump(adaptor, RelocInfo::CODE_TARGET);
4555     }
4556     bind(&regular_invoke);
4557   }
4558 }
4559 
CheckDebugHook(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)4560 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
4561                                     const ParameterCount& expected,
4562                                     const ParameterCount& actual) {
4563   Label skip_hook;
4564   ExternalReference debug_hook_active =
4565       ExternalReference::debug_hook_on_function_call_address(isolate());
4566   li(t0, Operand(debug_hook_active));
4567   lb(t0, MemOperand(t0));
4568   Branch(&skip_hook, eq, t0, Operand(zero_reg));
4569   {
4570     FrameScope frame(this,
4571                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
4572     if (expected.is_reg()) {
4573       SmiTag(expected.reg());
4574       Push(expected.reg());
4575     }
4576     if (actual.is_reg()) {
4577       SmiTag(actual.reg());
4578       Push(actual.reg());
4579     }
4580     if (new_target.is_valid()) {
4581       Push(new_target);
4582     }
4583     Push(fun);
4584     Push(fun);
4585     CallRuntime(Runtime::kDebugOnFunctionCall);
4586     Pop(fun);
4587     if (new_target.is_valid()) {
4588       Pop(new_target);
4589     }
4590     if (actual.is_reg()) {
4591       Pop(actual.reg());
4592       SmiUntag(actual.reg());
4593     }
4594     if (expected.is_reg()) {
4595       Pop(expected.reg());
4596       SmiUntag(expected.reg());
4597     }
4598   }
4599   bind(&skip_hook);
4600 }
4601 
4602 
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4603 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
4604                                         const ParameterCount& expected,
4605                                         const ParameterCount& actual,
4606                                         InvokeFlag flag,
4607                                         const CallWrapper& call_wrapper) {
4608   // You can't call a function without a valid frame.
4609   DCHECK(flag == JUMP_FUNCTION || has_frame());
4610   DCHECK(function.is(a1));
4611   DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
4612 
4613   if (call_wrapper.NeedsDebugHookCheck()) {
4614     CheckDebugHook(function, new_target, expected, actual);
4615   }
4616 
4617   // Clear the new.target register if not given.
4618   if (!new_target.is_valid()) {
4619     LoadRoot(a3, Heap::kUndefinedValueRootIndex);
4620   }
4621 
4622   Label done;
4623   bool definitely_mismatches = false;
4624   InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
4625                  call_wrapper);
4626   if (!definitely_mismatches) {
4627     // We call indirectly through the code field in the function to
4628     // allow recompilation to take effect without changing any of the
4629     // call sites.
4630     Register code = t0;
4631     lw(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4632     if (flag == CALL_FUNCTION) {
4633       call_wrapper.BeforeCall(CallSize(code));
4634       Call(code);
4635       call_wrapper.AfterCall();
4636     } else {
4637       DCHECK(flag == JUMP_FUNCTION);
4638       Jump(code);
4639     }
4640     // Continue here if InvokePrologue does handle the invocation due to
4641     // mismatched parameter counts.
4642     bind(&done);
4643   }
4644 }
4645 
4646 
InvokeFunction(Register function,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4647 void MacroAssembler::InvokeFunction(Register function,
4648                                     Register new_target,
4649                                     const ParameterCount& actual,
4650                                     InvokeFlag flag,
4651                                     const CallWrapper& call_wrapper) {
4652   // You can't call a function without a valid frame.
4653   DCHECK(flag == JUMP_FUNCTION || has_frame());
4654 
4655   // Contract with called JS functions requires that function is passed in a1.
4656   DCHECK(function.is(a1));
4657   Register expected_reg = a2;
4658   Register temp_reg = t0;
4659 
4660   lw(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4661   lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4662   lw(expected_reg,
4663      FieldMemOperand(temp_reg,
4664                      SharedFunctionInfo::kFormalParameterCountOffset));
4665   sra(expected_reg, expected_reg, kSmiTagSize);
4666 
4667   ParameterCount expected(expected_reg);
4668   InvokeFunctionCode(function, new_target, expected, actual, flag,
4669                      call_wrapper);
4670 }
4671 
4672 
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4673 void MacroAssembler::InvokeFunction(Register function,
4674                                     const ParameterCount& expected,
4675                                     const ParameterCount& actual,
4676                                     InvokeFlag flag,
4677                                     const CallWrapper& call_wrapper) {
4678   // You can't call a function without a valid frame.
4679   DCHECK(flag == JUMP_FUNCTION || has_frame());
4680 
4681   // Contract with called JS functions requires that function is passed in a1.
4682   DCHECK(function.is(a1));
4683 
4684   // Get the function and setup the context.
4685   lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4686 
4687   InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
4688 }
4689 
4690 
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4691 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4692                                     const ParameterCount& expected,
4693                                     const ParameterCount& actual,
4694                                     InvokeFlag flag,
4695                                     const CallWrapper& call_wrapper) {
4696   li(a1, function);
4697   InvokeFunction(a1, expected, actual, flag, call_wrapper);
4698 }
4699 
4700 
IsObjectJSStringType(Register object,Register scratch,Label * fail)4701 void MacroAssembler::IsObjectJSStringType(Register object,
4702                                           Register scratch,
4703                                           Label* fail) {
4704   DCHECK(kNotStringTag != 0);
4705 
4706   lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4707   lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4708   And(scratch, scratch, Operand(kIsNotStringMask));
4709   Branch(fail, ne, scratch, Operand(zero_reg));
4710 }
4711 
4712 
IsObjectNameType(Register object,Register scratch,Label * fail)4713 void MacroAssembler::IsObjectNameType(Register object,
4714                                       Register scratch,
4715                                       Label* fail) {
4716   lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4717   lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4718   Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
4719 }
4720 
4721 
4722 // ---------------------------------------------------------------------------
4723 // Support functions.
4724 
4725 
GetMapConstructor(Register result,Register map,Register temp,Register temp2)4726 void MacroAssembler::GetMapConstructor(Register result, Register map,
4727                                        Register temp, Register temp2) {
4728   Label done, loop;
4729   lw(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
4730   bind(&loop);
4731   JumpIfSmi(result, &done);
4732   GetObjectType(result, temp, temp2);
4733   Branch(&done, ne, temp2, Operand(MAP_TYPE));
4734   lw(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
4735   Branch(&loop);
4736   bind(&done);
4737 }
4738 
GetObjectType(Register object,Register map,Register type_reg)4739 void MacroAssembler::GetObjectType(Register object,
4740                                    Register map,
4741                                    Register type_reg) {
4742   lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
4743   lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4744 }
4745 
4746 
4747 // -----------------------------------------------------------------------------
4748 // Runtime calls.
4749 
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)4750 void MacroAssembler::CallStub(CodeStub* stub,
4751                               TypeFeedbackId ast_id,
4752                               Condition cond,
4753                               Register r1,
4754                               const Operand& r2,
4755                               BranchDelaySlot bd) {
4756   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
4757   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
4758        cond, r1, r2, bd);
4759 }
4760 
4761 
TailCallStub(CodeStub * stub,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)4762 void MacroAssembler::TailCallStub(CodeStub* stub,
4763                                   Condition cond,
4764                                   Register r1,
4765                                   const Operand& r2,
4766                                   BranchDelaySlot bd) {
4767   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4768 }
4769 
4770 
AllowThisStubCall(CodeStub * stub)4771 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4772   return has_frame_ || !stub->SometimesSetsUpAFrame();
4773 }
4774 
ObjectToDoubleFPURegister(Register object,FPURegister result,Register scratch1,Register scratch2,Register heap_number_map,Label * not_number,ObjectToDoubleFlags flags)4775 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4776                                                FPURegister result,
4777                                                Register scratch1,
4778                                                Register scratch2,
4779                                                Register heap_number_map,
4780                                                Label* not_number,
4781                                                ObjectToDoubleFlags flags) {
4782   Label done;
4783   if ((flags & OBJECT_NOT_SMI) == 0) {
4784     Label not_smi;
4785     JumpIfNotSmi(object, &not_smi);
4786     // Remove smi tag and convert to double.
4787     sra(scratch1, object, kSmiTagSize);
4788     mtc1(scratch1, result);
4789     cvt_d_w(result, result);
4790     Branch(&done);
4791     bind(&not_smi);
4792   }
4793   // Check for heap number and load double value from it.
4794   lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4795   Branch(not_number, ne, scratch1, Operand(heap_number_map));
4796 
4797   if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4798     // If exponent is all ones the number is either a NaN or +/-Infinity.
4799     Register exponent = scratch1;
4800     Register mask_reg = scratch2;
4801     lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4802     li(mask_reg, HeapNumber::kExponentMask);
4803 
4804     And(exponent, exponent, mask_reg);
4805     Branch(not_number, eq, exponent, Operand(mask_reg));
4806   }
4807   ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4808   bind(&done);
4809 }
4810 
4811 
SmiToDoubleFPURegister(Register smi,FPURegister value,Register scratch1)4812 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4813                                             FPURegister value,
4814                                             Register scratch1) {
4815   sra(scratch1, smi, kSmiTagSize);
4816   mtc1(scratch1, value);
4817   cvt_d_w(value, value);
4818 }
4819 
4820 
BranchOvfHelper(MacroAssembler * masm,Register overflow_dst,Label * overflow_label,Label * no_overflow_label)4821 static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
4822                                    Label* overflow_label,
4823                                    Label* no_overflow_label) {
4824   DCHECK(overflow_label || no_overflow_label);
4825   if (!overflow_label) {
4826     DCHECK(no_overflow_label);
4827     masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
4828   } else {
4829     masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
4830     if (no_overflow_label) masm->Branch(no_overflow_label);
4831   }
4832 }
4833 
4834 
AddBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)4835 void MacroAssembler::AddBranchOvf(Register dst, Register left,
4836                                   const Operand& right, Label* overflow_label,
4837                                   Label* no_overflow_label, Register scratch) {
4838   if (right.is_reg()) {
4839     AddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
4840                  scratch);
4841   } else {
4842     if (IsMipsArchVariant(kMips32r6)) {
4843       Register right_reg = t9;
4844       DCHECK(!left.is(right_reg));
4845       li(right_reg, Operand(right));
4846       AddBranchOvf(dst, left, right_reg, overflow_label, no_overflow_label);
4847     } else {
4848       Register overflow_dst = t9;
4849       DCHECK(!dst.is(scratch));
4850       DCHECK(!dst.is(overflow_dst));
4851       DCHECK(!scratch.is(overflow_dst));
4852       DCHECK(!left.is(overflow_dst));
4853       if (dst.is(left)) {
4854         mov(scratch, left);                  // Preserve left.
4855         Addu(dst, left, right.immediate());  // Left is overwritten.
4856         xor_(scratch, dst, scratch);         // Original left.
4857         // Load right since xori takes uint16 as immediate.
4858         Addu(overflow_dst, zero_reg, right);
4859         xor_(overflow_dst, dst, overflow_dst);
4860         and_(overflow_dst, overflow_dst, scratch);
4861       } else {
4862         Addu(dst, left, right.immediate());
4863         xor_(overflow_dst, dst, left);
4864         // Load right since xori takes uint16 as immediate.
4865         Addu(scratch, zero_reg, right);
4866         xor_(scratch, dst, scratch);
4867         and_(overflow_dst, scratch, overflow_dst);
4868       }
4869       BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
4870     }
4871   }
4872 }
4873 
4874 
AddBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)4875 void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
4876                                   Label* overflow_label,
4877                                   Label* no_overflow_label, Register scratch) {
4878   if (IsMipsArchVariant(kMips32r6)) {
4879     if (!overflow_label) {
4880       DCHECK(no_overflow_label);
4881       DCHECK(!dst.is(scratch));
4882       Register left_reg = left.is(dst) ? scratch : left;
4883       Register right_reg = right.is(dst) ? t9 : right;
4884       DCHECK(!dst.is(left_reg));
4885       DCHECK(!dst.is(right_reg));
4886       Move(left_reg, left);
4887       Move(right_reg, right);
4888       addu(dst, left, right);
4889       Bnvc(left_reg, right_reg, no_overflow_label);
4890     } else {
4891       Bovc(left, right, overflow_label);
4892       addu(dst, left, right);
4893       if (no_overflow_label) bc(no_overflow_label);
4894     }
4895   } else {
4896     Register overflow_dst = t9;
4897     DCHECK(!dst.is(scratch));
4898     DCHECK(!dst.is(overflow_dst));
4899     DCHECK(!scratch.is(overflow_dst));
4900     DCHECK(!left.is(overflow_dst));
4901     DCHECK(!right.is(overflow_dst));
4902     DCHECK(!left.is(scratch));
4903     DCHECK(!right.is(scratch));
4904 
4905     if (left.is(right) && dst.is(left)) {
4906       mov(overflow_dst, right);
4907       right = overflow_dst;
4908     }
4909 
4910     if (dst.is(left)) {
4911       mov(scratch, left);           // Preserve left.
4912       addu(dst, left, right);       // Left is overwritten.
4913       xor_(scratch, dst, scratch);  // Original left.
4914       xor_(overflow_dst, dst, right);
4915       and_(overflow_dst, overflow_dst, scratch);
4916     } else if (dst.is(right)) {
4917       mov(scratch, right);          // Preserve right.
4918       addu(dst, left, right);       // Right is overwritten.
4919       xor_(scratch, dst, scratch);  // Original right.
4920       xor_(overflow_dst, dst, left);
4921       and_(overflow_dst, overflow_dst, scratch);
4922     } else {
4923       addu(dst, left, right);
4924       xor_(overflow_dst, dst, left);
4925       xor_(scratch, dst, right);
4926       and_(overflow_dst, scratch, overflow_dst);
4927     }
4928     BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
4929   }
4930 }
4931 
4932 
SubBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)4933 void MacroAssembler::SubBranchOvf(Register dst, Register left,
4934                                   const Operand& right, Label* overflow_label,
4935                                   Label* no_overflow_label, Register scratch) {
4936   DCHECK(overflow_label || no_overflow_label);
4937   if (right.is_reg()) {
4938     SubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
4939                  scratch);
4940   } else {
4941     Register overflow_dst = t9;
4942     DCHECK(!dst.is(scratch));
4943     DCHECK(!dst.is(overflow_dst));
4944     DCHECK(!scratch.is(overflow_dst));
4945     DCHECK(!left.is(overflow_dst));
4946     DCHECK(!left.is(scratch));
4947     if (dst.is(left)) {
4948       mov(scratch, left);                      // Preserve left.
4949       Subu(dst, left, right.immediate());      // Left is overwritten.
4950       // Load right since xori takes uint16 as immediate.
4951       Addu(overflow_dst, zero_reg, right);
4952       xor_(overflow_dst, scratch, overflow_dst);  // scratch is original left.
4953       xor_(scratch, dst, scratch);                // scratch is original left.
4954       and_(overflow_dst, scratch, overflow_dst);
4955     } else {
4956       Subu(dst, left, right);
4957       xor_(overflow_dst, dst, left);
4958       // Load right since xori takes uint16 as immediate.
4959       Addu(scratch, zero_reg, right);
4960       xor_(scratch, left, scratch);
4961       and_(overflow_dst, scratch, overflow_dst);
4962     }
4963     BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
4964   }
4965 }
4966 
4967 
SubBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)4968 void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right,
4969                                   Label* overflow_label,
4970                                   Label* no_overflow_label, Register scratch) {
4971   DCHECK(overflow_label || no_overflow_label);
4972   Register overflow_dst = t9;
4973   DCHECK(!dst.is(scratch));
4974   DCHECK(!dst.is(overflow_dst));
4975   DCHECK(!scratch.is(overflow_dst));
4976   DCHECK(!overflow_dst.is(left));
4977   DCHECK(!overflow_dst.is(right));
4978   DCHECK(!scratch.is(left));
4979   DCHECK(!scratch.is(right));
4980 
4981   // This happens with some crankshaft code. Since Subu works fine if
4982   // left == right, let's not make that restriction here.
4983   if (left.is(right)) {
4984     mov(dst, zero_reg);
4985     if (no_overflow_label) {
4986       Branch(no_overflow_label);
4987     }
4988   }
4989 
4990   if (dst.is(left)) {
4991     mov(scratch, left);  // Preserve left.
4992     subu(dst, left, right);  // Left is overwritten.
4993     xor_(overflow_dst, dst, scratch);  // scratch is original left.
4994     xor_(scratch, scratch, right);  // scratch is original left.
4995     and_(overflow_dst, scratch, overflow_dst);
4996   } else if (dst.is(right)) {
4997     mov(scratch, right);  // Preserve right.
4998     subu(dst, left, right);  // Right is overwritten.
4999     xor_(overflow_dst, dst, left);
5000     xor_(scratch, left, scratch);  // Original right.
5001     and_(overflow_dst, scratch, overflow_dst);
5002   } else {
5003     subu(dst, left, right);
5004     xor_(overflow_dst, dst, left);
5005     xor_(scratch, left, right);
5006     and_(overflow_dst, scratch, overflow_dst);
5007   }
5008   BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5009 }
5010 
BranchOvfHelperMult(MacroAssembler * masm,Register overflow_dst,Label * overflow_label,Label * no_overflow_label)5011 static inline void BranchOvfHelperMult(MacroAssembler* masm,
5012                                        Register overflow_dst,
5013                                        Label* overflow_label,
5014                                        Label* no_overflow_label) {
5015   DCHECK(overflow_label || no_overflow_label);
5016   if (!overflow_label) {
5017     DCHECK(no_overflow_label);
5018     masm->Branch(no_overflow_label, eq, overflow_dst, Operand(zero_reg));
5019   } else {
5020     masm->Branch(overflow_label, ne, overflow_dst, Operand(zero_reg));
5021     if (no_overflow_label) masm->Branch(no_overflow_label);
5022   }
5023 }
5024 
MulBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)5025 void MacroAssembler::MulBranchOvf(Register dst, Register left,
5026                                   const Operand& right, Label* overflow_label,
5027                                   Label* no_overflow_label, Register scratch) {
5028   DCHECK(overflow_label || no_overflow_label);
5029   if (right.is_reg()) {
5030     MulBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5031                  scratch);
5032   } else {
5033     Register overflow_dst = t9;
5034     DCHECK(!dst.is(scratch));
5035     DCHECK(!dst.is(overflow_dst));
5036     DCHECK(!scratch.is(overflow_dst));
5037     DCHECK(!left.is(overflow_dst));
5038     DCHECK(!left.is(scratch));
5039 
5040     Mul(overflow_dst, dst, left, right.immediate());
5041     sra(scratch, dst, 31);
5042     xor_(overflow_dst, overflow_dst, scratch);
5043 
5044     BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
5045   }
5046 }
5047 
MulBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)5048 void MacroAssembler::MulBranchOvf(Register dst, Register left, Register right,
5049                                   Label* overflow_label,
5050                                   Label* no_overflow_label, Register scratch) {
5051   DCHECK(overflow_label || no_overflow_label);
5052   Register overflow_dst = t9;
5053   DCHECK(!dst.is(scratch));
5054   DCHECK(!dst.is(overflow_dst));
5055   DCHECK(!scratch.is(overflow_dst));
5056   DCHECK(!overflow_dst.is(left));
5057   DCHECK(!overflow_dst.is(right));
5058   DCHECK(!scratch.is(left));
5059   DCHECK(!scratch.is(right));
5060 
5061   if (IsMipsArchVariant(kMips32r6) && dst.is(right)) {
5062     mov(scratch, right);
5063     Mul(overflow_dst, dst, left, scratch);
5064     sra(scratch, dst, 31);
5065     xor_(overflow_dst, overflow_dst, scratch);
5066   } else {
5067     Mul(overflow_dst, dst, left, right);
5068     sra(scratch, dst, 31);
5069     xor_(overflow_dst, overflow_dst, scratch);
5070   }
5071 
5072   BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
5073 }
5074 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles,BranchDelaySlot bd)5075 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
5076                                  SaveFPRegsMode save_doubles,
5077                                  BranchDelaySlot bd) {
5078   // All parameters are on the stack. v0 has the return value after call.
5079 
5080   // If the expected number of arguments of the runtime function is
5081   // constant, we check that the actual number of arguments match the
5082   // expectation.
5083   CHECK(f->nargs < 0 || f->nargs == num_arguments);
5084 
5085   // TODO(1236192): Most runtime routines don't need the number of
5086   // arguments passed in because it is constant. At some point we
5087   // should remove this need and make the runtime routine entry code
5088   // smarter.
5089   PrepareCEntryArgs(num_arguments);
5090   PrepareCEntryFunction(ExternalReference(f, isolate()));
5091   CEntryStub stub(isolate(), 1, save_doubles);
5092   CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
5093 }
5094 
5095 
CallExternalReference(const ExternalReference & ext,int num_arguments,BranchDelaySlot bd)5096 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
5097                                            int num_arguments,
5098                                            BranchDelaySlot bd) {
5099   PrepareCEntryArgs(num_arguments);
5100   PrepareCEntryFunction(ext);
5101 
5102   CEntryStub stub(isolate(), 1);
5103   CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
5104 }
5105 
5106 
TailCallRuntime(Runtime::FunctionId fid)5107 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
5108   const Runtime::Function* function = Runtime::FunctionForId(fid);
5109   DCHECK_EQ(1, function->result_size);
5110   if (function->nargs >= 0) {
5111     PrepareCEntryArgs(function->nargs);
5112   }
5113   JumpToExternalReference(ExternalReference(fid, isolate()));
5114 }
5115 
JumpToExternalReference(const ExternalReference & builtin,BranchDelaySlot bd,bool builtin_exit_frame)5116 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
5117                                              BranchDelaySlot bd,
5118                                              bool builtin_exit_frame) {
5119   PrepareCEntryFunction(builtin);
5120   CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
5121                   builtin_exit_frame);
5122   Jump(stub.GetCode(),
5123        RelocInfo::CODE_TARGET,
5124        al,
5125        zero_reg,
5126        Operand(zero_reg),
5127        bd);
5128 }
5129 
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)5130 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
5131                                 Register scratch1, Register scratch2) {
5132   if (FLAG_native_code_counters && counter->Enabled()) {
5133     li(scratch1, Operand(value));
5134     li(scratch2, Operand(ExternalReference(counter)));
5135     sw(scratch1, MemOperand(scratch2));
5136   }
5137 }
5138 
5139 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)5140 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
5141                                       Register scratch1, Register scratch2) {
5142   DCHECK(value > 0);
5143   if (FLAG_native_code_counters && counter->Enabled()) {
5144     li(scratch2, Operand(ExternalReference(counter)));
5145     lw(scratch1, MemOperand(scratch2));
5146     Addu(scratch1, scratch1, Operand(value));
5147     sw(scratch1, MemOperand(scratch2));
5148   }
5149 }
5150 
5151 
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)5152 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
5153                                       Register scratch1, Register scratch2) {
5154   DCHECK(value > 0);
5155   if (FLAG_native_code_counters && counter->Enabled()) {
5156     li(scratch2, Operand(ExternalReference(counter)));
5157     lw(scratch1, MemOperand(scratch2));
5158     Subu(scratch1, scratch1, Operand(value));
5159     sw(scratch1, MemOperand(scratch2));
5160   }
5161 }
5162 
5163 
5164 // -----------------------------------------------------------------------------
5165 // Debugging.
5166 
Assert(Condition cc,BailoutReason reason,Register rs,Operand rt)5167 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
5168                             Register rs, Operand rt) {
5169   if (emit_debug_code())
5170     Check(cc, reason, rs, rt);
5171 }
5172 
5173 
AssertFastElements(Register elements)5174 void MacroAssembler::AssertFastElements(Register elements) {
5175   if (emit_debug_code()) {
5176     DCHECK(!elements.is(at));
5177     Label ok;
5178     push(elements);
5179     lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
5180     LoadRoot(at, Heap::kFixedArrayMapRootIndex);
5181     Branch(&ok, eq, elements, Operand(at));
5182     LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
5183     Branch(&ok, eq, elements, Operand(at));
5184     LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
5185     Branch(&ok, eq, elements, Operand(at));
5186     Abort(kJSObjectWithFastElementsMapHasSlowElements);
5187     bind(&ok);
5188     pop(elements);
5189   }
5190 }
5191 
5192 
Check(Condition cc,BailoutReason reason,Register rs,Operand rt)5193 void MacroAssembler::Check(Condition cc, BailoutReason reason,
5194                            Register rs, Operand rt) {
5195   Label L;
5196   Branch(&L, cc, rs, rt);
5197   Abort(reason);
5198   // Will not return here.
5199   bind(&L);
5200 }
5201 
5202 
Abort(BailoutReason reason)5203 void MacroAssembler::Abort(BailoutReason reason) {
5204   Label abort_start;
5205   bind(&abort_start);
5206 #ifdef DEBUG
5207   const char* msg = GetBailoutReason(reason);
5208   if (msg != NULL) {
5209     RecordComment("Abort message: ");
5210     RecordComment(msg);
5211   }
5212 
5213   if (FLAG_trap_on_abort) {
5214     stop(msg);
5215     return;
5216   }
5217 #endif
5218 
5219   // Check if Abort() has already been initialized.
5220   DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
5221 
5222   Move(a0, Smi::FromInt(static_cast<int>(reason)));
5223 
5224   // Disable stub call restrictions to always allow calls to abort.
5225   if (!has_frame_) {
5226     // We don't actually want to generate a pile of code for this, so just
5227     // claim there is a stack frame, without generating one.
5228     FrameScope scope(this, StackFrame::NONE);
5229     Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
5230   } else {
5231     Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
5232   }
5233   // Will not return here.
5234   if (is_trampoline_pool_blocked()) {
5235     // If the calling code cares about the exact number of
5236     // instructions generated, we insert padding here to keep the size
5237     // of the Abort macro constant.
5238     // Currently in debug mode with debug_code enabled the number of
5239     // generated instructions is 10, so we use this as a maximum value.
5240     static const int kExpectedAbortInstructions = 10;
5241     int abort_instructions = InstructionsGeneratedSince(&abort_start);
5242     DCHECK(abort_instructions <= kExpectedAbortInstructions);
5243     while (abort_instructions++ < kExpectedAbortInstructions) {
5244       nop();
5245     }
5246   }
5247 }
5248 
5249 
LoadContext(Register dst,int context_chain_length)5250 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
5251   if (context_chain_length > 0) {
5252     // Move up the chain of contexts to the context containing the slot.
5253     lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5254     for (int i = 1; i < context_chain_length; i++) {
5255       lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5256     }
5257   } else {
5258     // Slot is in the current function context.  Move it into the
5259     // destination register in case we store into it (the write barrier
5260     // cannot be allowed to destroy the context in esi).
5261     Move(dst, cp);
5262   }
5263 }
5264 
LoadNativeContextSlot(int index,Register dst)5265 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
5266   lw(dst, NativeContextMemOperand());
5267   lw(dst, ContextMemOperand(dst, index));
5268 }
5269 
5270 
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)5271 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
5272                                                   Register map,
5273                                                   Register scratch) {
5274   // Load the initial map. The global functions all have initial maps.
5275   lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5276   if (emit_debug_code()) {
5277     Label ok, fail;
5278     CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
5279     Branch(&ok);
5280     bind(&fail);
5281     Abort(kGlobalFunctionsMustHaveInitialMap);
5282     bind(&ok);
5283   }
5284 }
5285 
StubPrologue(StackFrame::Type type)5286 void MacroAssembler::StubPrologue(StackFrame::Type type) {
5287   li(at, Operand(StackFrame::TypeToMarker(type)));
5288   PushCommonFrame(at);
5289 }
5290 
5291 
Prologue(bool code_pre_aging)5292 void MacroAssembler::Prologue(bool code_pre_aging) {
5293   PredictableCodeSizeScope predictible_code_size_scope(
5294       this, kNoCodeAgeSequenceLength);
5295   // The following three instructions must remain together and unmodified
5296   // for code aging to work properly.
5297   if (code_pre_aging) {
5298     // Pre-age the code.
5299     Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
5300     nop(Assembler::CODE_AGE_MARKER_NOP);
5301     // Load the stub address to t9 and call it,
5302     // GetCodeAge() extracts the stub address from this instruction.
5303     li(t9,
5304        Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
5305        CONSTANT_SIZE);
5306     nop();  // Prevent jalr to jal optimization.
5307     jalr(t9, a0);
5308     nop();  // Branch delay slot nop.
5309     nop();  // Pad the empty space.
5310   } else {
5311     PushStandardFrame(a1);
5312     nop(Assembler::CODE_AGE_SEQUENCE_NOP);
5313   }
5314 }
5315 
EmitLoadFeedbackVector(Register vector)5316 void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
5317   lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5318   lw(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
5319   lw(vector, FieldMemOperand(vector, Cell::kValueOffset));
5320 }
5321 
5322 
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)5323 void MacroAssembler::EnterFrame(StackFrame::Type type,
5324                                 bool load_constant_pool_pointer_reg) {
5325   // Out-of-line constant pool not implemented on mips.
5326   UNREACHABLE();
5327 }
5328 
5329 
EnterFrame(StackFrame::Type type)5330 void MacroAssembler::EnterFrame(StackFrame::Type type) {
5331   int stack_offset, fp_offset;
5332   if (type == StackFrame::INTERNAL) {
5333     stack_offset = -4 * kPointerSize;
5334     fp_offset = 2 * kPointerSize;
5335   } else {
5336     stack_offset = -3 * kPointerSize;
5337     fp_offset = 1 * kPointerSize;
5338   }
5339   addiu(sp, sp, stack_offset);
5340   stack_offset = -stack_offset - kPointerSize;
5341   sw(ra, MemOperand(sp, stack_offset));
5342   stack_offset -= kPointerSize;
5343   sw(fp, MemOperand(sp, stack_offset));
5344   stack_offset -= kPointerSize;
5345   li(t9, Operand(StackFrame::TypeToMarker(type)));
5346   sw(t9, MemOperand(sp, stack_offset));
5347   if (type == StackFrame::INTERNAL) {
5348     DCHECK_EQ(stack_offset, kPointerSize);
5349     li(t9, Operand(CodeObject()));
5350     sw(t9, MemOperand(sp, 0));
5351   } else {
5352     DCHECK_EQ(stack_offset, 0);
5353   }
5354   // Adjust FP to point to saved FP.
5355   Addu(fp, sp, Operand(fp_offset));
5356 }
5357 
5358 
LeaveFrame(StackFrame::Type type)5359 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
5360   addiu(sp, fp, 2 * kPointerSize);
5361   lw(ra, MemOperand(fp, 1 * kPointerSize));
5362   lw(fp, MemOperand(fp, 0 * kPointerSize));
5363 }
5364 
EnterBuiltinFrame(Register context,Register target,Register argc)5365 void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
5366                                        Register argc) {
5367   Push(ra, fp);
5368   Move(fp, sp);
5369   Push(context, target, argc);
5370 }
5371 
LeaveBuiltinFrame(Register context,Register target,Register argc)5372 void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
5373                                        Register argc) {
5374   Pop(context, target, argc);
5375   Pop(ra, fp);
5376 }
5377 
EnterExitFrame(bool save_doubles,int stack_space,StackFrame::Type frame_type)5378 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
5379                                     StackFrame::Type frame_type) {
5380   DCHECK(frame_type == StackFrame::EXIT ||
5381          frame_type == StackFrame::BUILTIN_EXIT);
5382 
5383   // Set up the frame structure on the stack.
5384   STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
5385   STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
5386   STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
5387 
5388   // This is how the stack will look:
5389   // fp + 2 (==kCallerSPDisplacement) - old stack's end
5390   // [fp + 1 (==kCallerPCOffset)] - saved old ra
5391   // [fp + 0 (==kCallerFPOffset)] - saved old fp
5392   // [fp - 1 StackFrame::EXIT Smi
5393   // [fp - 2 (==kSPOffset)] - sp of the called function
5394   // [fp - 3 (==kCodeOffset)] - CodeObject
5395   // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
5396   //   new stack (will contain saved ra)
5397 
5398   // Save registers and reserve room for saved entry sp and code object.
5399   addiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
5400   sw(ra, MemOperand(sp, 4 * kPointerSize));
5401   sw(fp, MemOperand(sp, 3 * kPointerSize));
5402   li(at, Operand(StackFrame::TypeToMarker(frame_type)));
5403   sw(at, MemOperand(sp, 2 * kPointerSize));
5404   // Set up new frame pointer.
5405   addiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
5406 
5407   if (emit_debug_code()) {
5408     sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
5409   }
5410 
5411   // Accessed from ExitFrame::code_slot.
5412   li(t8, Operand(CodeObject()), CONSTANT_SIZE);
5413   sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
5414 
5415   // Save the frame pointer and the context in top.
5416   li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5417   sw(fp, MemOperand(t8));
5418   li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5419   sw(cp, MemOperand(t8));
5420 
5421   const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
5422   if (save_doubles) {
5423     // The stack  must be allign to 0 modulo 8 for stores with sdc1.
5424     DCHECK(kDoubleSize == frame_alignment);
5425     if (frame_alignment > 0) {
5426       DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5427       And(sp, sp, Operand(-frame_alignment));  // Align stack.
5428     }
5429     int space = FPURegister::kMaxNumRegisters * kDoubleSize;
5430     Subu(sp, sp, Operand(space));
5431     // Remember: we only need to save every 2nd double FPU value.
5432     for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
5433       FPURegister reg = FPURegister::from_code(i);
5434       sdc1(reg, MemOperand(sp, i * kDoubleSize));
5435     }
5436   }
5437 
5438   // Reserve place for the return address, stack space and an optional slot
5439   // (used by the DirectCEntryStub to hold the return value if a struct is
5440   // returned) and align the frame preparing for calling the runtime function.
5441   DCHECK(stack_space >= 0);
5442   Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
5443   if (frame_alignment > 0) {
5444     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5445     And(sp, sp, Operand(-frame_alignment));  // Align stack.
5446   }
5447 
5448   // Set the exit frame sp value to point just before the return address
5449   // location.
5450   addiu(at, sp, kPointerSize);
5451   sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
5452 }
5453 
5454 
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool do_return,bool argument_count_is_length)5455 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
5456                                     bool restore_context, bool do_return,
5457                                     bool argument_count_is_length) {
5458   // Optionally restore all double registers.
5459   if (save_doubles) {
5460     // Remember: we only need to restore every 2nd double FPU value.
5461     lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
5462     for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
5463       FPURegister reg = FPURegister::from_code(i);
5464       ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
5465     }
5466   }
5467 
5468   // Clear top frame.
5469   li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5470   sw(zero_reg, MemOperand(t8));
5471 
5472   // Restore current context from top and clear it in debug mode.
5473   if (restore_context) {
5474     li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5475     lw(cp, MemOperand(t8));
5476   }
5477 #ifdef DEBUG
5478   li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5479   sw(a3, MemOperand(t8));
5480 #endif
5481 
5482   // Pop the arguments, restore registers, and return.
5483   mov(sp, fp);  // Respect ABI stack constraint.
5484   lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
5485   lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
5486 
5487   if (argument_count.is_valid()) {
5488     if (argument_count_is_length) {
5489       addu(sp, sp, argument_count);
5490     } else {
5491       Lsa(sp, sp, argument_count, kPointerSizeLog2, t8);
5492     }
5493   }
5494 
5495   if (do_return) {
5496     Ret(USE_DELAY_SLOT);
5497     // If returning, the instruction in the delay slot will be the addiu below.
5498   }
5499   addiu(sp, sp, 8);
5500 }
5501 
ActivationFrameAlignment()5502 int MacroAssembler::ActivationFrameAlignment() {
5503 #if V8_HOST_ARCH_MIPS
5504   // Running on the real platform. Use the alignment as mandated by the local
5505   // environment.
5506   // Note: This will break if we ever start generating snapshots on one Mips
5507   // platform for another Mips platform with a different alignment.
5508   return base::OS::ActivationFrameAlignment();
5509 #else  // V8_HOST_ARCH_MIPS
5510   // If we are using the simulator then we should always align to the expected
5511   // alignment. As the simulator is used to generate snapshots we do not know
5512   // if the target platform will need alignment, so this is controlled from a
5513   // flag.
5514   return FLAG_sim_stack_alignment;
5515 #endif  // V8_HOST_ARCH_MIPS
5516 }
5517 
5518 
AssertStackIsAligned()5519 void MacroAssembler::AssertStackIsAligned() {
5520   if (emit_debug_code()) {
5521       const int frame_alignment = ActivationFrameAlignment();
5522       const int frame_alignment_mask = frame_alignment - 1;
5523 
5524       if (frame_alignment > kPointerSize) {
5525         Label alignment_as_expected;
5526         DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5527         andi(at, sp, frame_alignment_mask);
5528         Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5529         // Don't use Check here, as it will call Runtime_Abort re-entering here.
5530         stop("Unexpected stack alignment");
5531         bind(&alignment_as_expected);
5532       }
5533     }
5534 }
5535 
5536 
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)5537 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
5538     Register reg,
5539     Register scratch,
5540     Label* not_power_of_two_or_zero) {
5541   Subu(scratch, reg, Operand(1));
5542   Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
5543          scratch, Operand(zero_reg));
5544   and_(at, scratch, reg);  // In the delay slot.
5545   Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
5546 }
5547 
5548 
SmiTagCheckOverflow(Register reg,Register overflow)5549 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
5550   DCHECK(!reg.is(overflow));
5551   mov(overflow, reg);  // Save original value.
5552   SmiTag(reg);
5553   xor_(overflow, overflow, reg);  // Overflow if (value ^ 2 * value) < 0.
5554 }
5555 
5556 
SmiTagCheckOverflow(Register dst,Register src,Register overflow)5557 void MacroAssembler::SmiTagCheckOverflow(Register dst,
5558                                          Register src,
5559                                          Register overflow) {
5560   if (dst.is(src)) {
5561     // Fall back to slower case.
5562     SmiTagCheckOverflow(dst, overflow);
5563   } else {
5564     DCHECK(!dst.is(src));
5565     DCHECK(!dst.is(overflow));
5566     DCHECK(!src.is(overflow));
5567     SmiTag(dst, src);
5568     xor_(overflow, dst, src);  // Overflow if (value ^ 2 * value) < 0.
5569   }
5570 }
5571 
5572 
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)5573 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5574                                        Register src,
5575                                        Label* smi_case) {
5576   JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
5577   SmiUntag(dst, src);
5578 }
5579 
JumpIfSmi(Register value,Label * smi_label,Register scratch,BranchDelaySlot bd)5580 void MacroAssembler::JumpIfSmi(Register value,
5581                                Label* smi_label,
5582                                Register scratch,
5583                                BranchDelaySlot bd) {
5584   DCHECK_EQ(0, kSmiTag);
5585   andi(scratch, value, kSmiTagMask);
5586   Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5587 }
5588 
JumpIfNotSmi(Register value,Label * not_smi_label,Register scratch,BranchDelaySlot bd)5589 void MacroAssembler::JumpIfNotSmi(Register value,
5590                                   Label* not_smi_label,
5591                                   Register scratch,
5592                                   BranchDelaySlot bd) {
5593   DCHECK_EQ(0, kSmiTag);
5594   andi(scratch, value, kSmiTagMask);
5595   Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5596 }
5597 
5598 
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)5599 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5600                                       Register reg2,
5601                                       Label* on_not_both_smi) {
5602   STATIC_ASSERT(kSmiTag == 0);
5603   DCHECK_EQ(1, kSmiTagMask);
5604   or_(at, reg1, reg2);
5605   JumpIfNotSmi(at, on_not_both_smi);
5606 }
5607 
5608 
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)5609 void MacroAssembler::JumpIfEitherSmi(Register reg1,
5610                                      Register reg2,
5611                                      Label* on_either_smi) {
5612   STATIC_ASSERT(kSmiTag == 0);
5613   DCHECK_EQ(1, kSmiTagMask);
5614   // Both Smi tags must be 1 (not Smi).
5615   and_(at, reg1, reg2);
5616   JumpIfSmi(at, on_either_smi);
5617 }
5618 
AssertNotNumber(Register object)5619 void MacroAssembler::AssertNotNumber(Register object) {
5620   if (emit_debug_code()) {
5621     STATIC_ASSERT(kSmiTag == 0);
5622     andi(at, object, kSmiTagMask);
5623     Check(ne, kOperandIsANumber, at, Operand(zero_reg));
5624     GetObjectType(object, t8, t8);
5625     Check(ne, kOperandIsNotANumber, t8, Operand(HEAP_NUMBER_TYPE));
5626   }
5627 }
5628 
AssertNotSmi(Register object)5629 void MacroAssembler::AssertNotSmi(Register object) {
5630   if (emit_debug_code()) {
5631     STATIC_ASSERT(kSmiTag == 0);
5632     andi(at, object, kSmiTagMask);
5633     Check(ne, kOperandIsASmi, at, Operand(zero_reg));
5634   }
5635 }
5636 
5637 
AssertSmi(Register object)5638 void MacroAssembler::AssertSmi(Register object) {
5639   if (emit_debug_code()) {
5640     STATIC_ASSERT(kSmiTag == 0);
5641     andi(at, object, kSmiTagMask);
5642     Check(eq, kOperandIsASmi, at, Operand(zero_reg));
5643   }
5644 }
5645 
5646 
AssertString(Register object)5647 void MacroAssembler::AssertString(Register object) {
5648   if (emit_debug_code()) {
5649     STATIC_ASSERT(kSmiTag == 0);
5650     SmiTst(object, t8);
5651     Check(ne, kOperandIsASmiAndNotAString, t8, Operand(zero_reg));
5652     GetObjectType(object, t8, t8);
5653     Check(lo, kOperandIsNotAString, t8, Operand(FIRST_NONSTRING_TYPE));
5654   }
5655 }
5656 
5657 
AssertName(Register object)5658 void MacroAssembler::AssertName(Register object) {
5659   if (emit_debug_code()) {
5660     STATIC_ASSERT(kSmiTag == 0);
5661     SmiTst(object, t8);
5662     Check(ne, kOperandIsASmiAndNotAName, t8, Operand(zero_reg));
5663     GetObjectType(object, t8, t8);
5664     Check(le, kOperandIsNotAName, t8, Operand(LAST_NAME_TYPE));
5665   }
5666 }
5667 
5668 
AssertFunction(Register object)5669 void MacroAssembler::AssertFunction(Register object) {
5670   if (emit_debug_code()) {
5671     STATIC_ASSERT(kSmiTag == 0);
5672     SmiTst(object, t8);
5673     Check(ne, kOperandIsASmiAndNotAFunction, t8, Operand(zero_reg));
5674     GetObjectType(object, t8, t8);
5675     Check(eq, kOperandIsNotAFunction, t8, Operand(JS_FUNCTION_TYPE));
5676   }
5677 }
5678 
5679 
AssertBoundFunction(Register object)5680 void MacroAssembler::AssertBoundFunction(Register object) {
5681   if (emit_debug_code()) {
5682     STATIC_ASSERT(kSmiTag == 0);
5683     SmiTst(object, t8);
5684     Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
5685     GetObjectType(object, t8, t8);
5686     Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
5687   }
5688 }
5689 
AssertGeneratorObject(Register object)5690 void MacroAssembler::AssertGeneratorObject(Register object) {
5691   if (emit_debug_code()) {
5692     STATIC_ASSERT(kSmiTag == 0);
5693     SmiTst(object, t8);
5694     Check(ne, kOperandIsASmiAndNotAGeneratorObject, t8, Operand(zero_reg));
5695     GetObjectType(object, t8, t8);
5696     Check(eq, kOperandIsNotAGeneratorObject, t8,
5697           Operand(JS_GENERATOR_OBJECT_TYPE));
5698   }
5699 }
5700 
AssertReceiver(Register object)5701 void MacroAssembler::AssertReceiver(Register object) {
5702   if (emit_debug_code()) {
5703     STATIC_ASSERT(kSmiTag == 0);
5704     SmiTst(object, t8);
5705     Check(ne, kOperandIsASmiAndNotAReceiver, t8, Operand(zero_reg));
5706     GetObjectType(object, t8, t8);
5707     Check(ge, kOperandIsNotAReceiver, t8, Operand(FIRST_JS_RECEIVER_TYPE));
5708   }
5709 }
5710 
5711 
AssertUndefinedOrAllocationSite(Register object,Register scratch)5712 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
5713                                                      Register scratch) {
5714   if (emit_debug_code()) {
5715     Label done_checking;
5716     AssertNotSmi(object);
5717     LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5718     Branch(&done_checking, eq, object, Operand(scratch));
5719     lw(t8, FieldMemOperand(object, HeapObject::kMapOffset));
5720     LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
5721     Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
5722     bind(&done_checking);
5723   }
5724 }
5725 
5726 
AssertIsRoot(Register reg,Heap::RootListIndex index)5727 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
5728   if (emit_debug_code()) {
5729     DCHECK(!reg.is(at));
5730     LoadRoot(at, index);
5731     Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
5732   }
5733 }
5734 
5735 
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)5736 void MacroAssembler::JumpIfNotHeapNumber(Register object,
5737                                          Register heap_number_map,
5738                                          Register scratch,
5739                                          Label* on_not_heap_number) {
5740   lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5741   AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
5742   Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
5743 }
5744 
5745 
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)5746 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
5747     Register first, Register second, Register scratch1, Register scratch2,
5748     Label* failure) {
5749   // Test that both first and second are sequential one-byte strings.
5750   // Assume that they are non-smis.
5751   lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
5752   lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
5753   lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
5754   lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
5755 
5756   JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
5757                                                  scratch2, failure);
5758 }
5759 
5760 
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)5761 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
5762                                                            Register second,
5763                                                            Register scratch1,
5764                                                            Register scratch2,
5765                                                            Label* failure) {
5766   // Check that neither is a smi.
5767   STATIC_ASSERT(kSmiTag == 0);
5768   And(scratch1, first, Operand(second));
5769   JumpIfSmi(scratch1, failure);
5770   JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
5771                                                scratch2, failure);
5772 }
5773 
Float32Max(FPURegister dst,FPURegister src1,FPURegister src2,Label * out_of_line)5774 void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1,
5775                                 FPURegister src2, Label* out_of_line) {
5776   if (src1.is(src2)) {
5777     Move_s(dst, src1);
5778     return;
5779   }
5780 
5781   // Check if one of operands is NaN.
5782   BranchF32(nullptr, out_of_line, eq, src1, src2);
5783 
5784   if (IsMipsArchVariant(kMips32r6)) {
5785     max_s(dst, src1, src2);
5786   } else {
5787     Label return_left, return_right, done;
5788 
5789     BranchF32(&return_right, nullptr, lt, src1, src2);
5790     BranchF32(&return_left, nullptr, lt, src2, src1);
5791 
5792     // Operands are equal, but check for +/-0.
5793     mfc1(t8, src1);
5794     Branch(&return_left, eq, t8, Operand(zero_reg));
5795     Branch(&return_right);
5796 
5797     bind(&return_right);
5798     if (!src2.is(dst)) {
5799       Move_s(dst, src2);
5800     }
5801     Branch(&done);
5802 
5803     bind(&return_left);
5804     if (!src1.is(dst)) {
5805       Move_s(dst, src1);
5806     }
5807 
5808     bind(&done);
5809   }
5810 }
5811 
Float32MaxOutOfLine(FPURegister dst,FPURegister src1,FPURegister src2)5812 void MacroAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
5813                                          FPURegister src2) {
5814   add_s(dst, src1, src2);
5815 }
5816 
Float32Min(FPURegister dst,FPURegister src1,FPURegister src2,Label * out_of_line)5817 void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1,
5818                                 FPURegister src2, Label* out_of_line) {
5819   if (src1.is(src2)) {
5820     Move_s(dst, src1);
5821     return;
5822   }
5823 
5824   // Check if one of operands is NaN.
5825   BranchF32(nullptr, out_of_line, eq, src1, src2);
5826 
5827   if (IsMipsArchVariant(kMips32r6)) {
5828     min_s(dst, src1, src2);
5829   } else {
5830     Label return_left, return_right, done;
5831 
5832     BranchF32(&return_left, nullptr, lt, src1, src2);
5833     BranchF32(&return_right, nullptr, lt, src2, src1);
5834 
5835     // Left equals right => check for -0.
5836     mfc1(t8, src1);
5837     Branch(&return_right, eq, t8, Operand(zero_reg));
5838     Branch(&return_left);
5839 
5840     bind(&return_right);
5841     if (!src2.is(dst)) {
5842       Move_s(dst, src2);
5843     }
5844     Branch(&done);
5845 
5846     bind(&return_left);
5847     if (!src1.is(dst)) {
5848       Move_s(dst, src1);
5849     }
5850 
5851     bind(&done);
5852   }
5853 }
5854 
Float32MinOutOfLine(FPURegister dst,FPURegister src1,FPURegister src2)5855 void MacroAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
5856                                          FPURegister src2) {
5857   add_s(dst, src1, src2);
5858 }
5859 
Float64Max(DoubleRegister dst,DoubleRegister src1,DoubleRegister src2,Label * out_of_line)5860 void MacroAssembler::Float64Max(DoubleRegister dst, DoubleRegister src1,
5861                                 DoubleRegister src2, Label* out_of_line) {
5862   if (src1.is(src2)) {
5863     Move_d(dst, src1);
5864     return;
5865   }
5866 
5867   // Check if one of operands is NaN.
5868   BranchF64(nullptr, out_of_line, eq, src1, src2);
5869 
5870   if (IsMipsArchVariant(kMips32r6)) {
5871     max_d(dst, src1, src2);
5872   } else {
5873     Label return_left, return_right, done;
5874 
5875     BranchF64(&return_right, nullptr, lt, src1, src2);
5876     BranchF64(&return_left, nullptr, lt, src2, src1);
5877 
5878     // Left equals right => check for -0.
5879     Mfhc1(t8, src1);
5880     Branch(&return_left, eq, t8, Operand(zero_reg));
5881     Branch(&return_right);
5882 
5883     bind(&return_right);
5884     if (!src2.is(dst)) {
5885       Move_d(dst, src2);
5886     }
5887     Branch(&done);
5888 
5889     bind(&return_left);
5890     if (!src1.is(dst)) {
5891       Move_d(dst, src1);
5892     }
5893 
5894     bind(&done);
5895   }
5896 }
5897 
Float64MaxOutOfLine(DoubleRegister dst,DoubleRegister src1,DoubleRegister src2)5898 void MacroAssembler::Float64MaxOutOfLine(DoubleRegister dst,
5899                                          DoubleRegister src1,
5900                                          DoubleRegister src2) {
5901   add_d(dst, src1, src2);
5902 }
5903 
Float64Min(DoubleRegister dst,DoubleRegister src1,DoubleRegister src2,Label * out_of_line)5904 void MacroAssembler::Float64Min(DoubleRegister dst, DoubleRegister src1,
5905                                 DoubleRegister src2, Label* out_of_line) {
5906   if (src1.is(src2)) {
5907     Move_d(dst, src1);
5908     return;
5909   }
5910 
5911   // Check if one of operands is NaN.
5912   BranchF64(nullptr, out_of_line, eq, src1, src2);
5913 
5914   if (IsMipsArchVariant(kMips32r6)) {
5915     min_d(dst, src1, src2);
5916   } else {
5917     Label return_left, return_right, done;
5918 
5919     BranchF64(&return_left, nullptr, lt, src1, src2);
5920     BranchF64(&return_right, nullptr, lt, src2, src1);
5921 
5922     // Left equals right => check for -0.
5923     Mfhc1(t8, src1);
5924     Branch(&return_right, eq, t8, Operand(zero_reg));
5925     Branch(&return_left);
5926 
5927     bind(&return_right);
5928     if (!src2.is(dst)) {
5929       Move_d(dst, src2);
5930     }
5931     Branch(&done);
5932 
5933     bind(&return_left);
5934     if (!src1.is(dst)) {
5935       Move_d(dst, src1);
5936     }
5937 
5938     bind(&done);
5939   }
5940 }
5941 
Float64MinOutOfLine(DoubleRegister dst,DoubleRegister src1,DoubleRegister src2)5942 void MacroAssembler::Float64MinOutOfLine(DoubleRegister dst,
5943                                          DoubleRegister src1,
5944                                          DoubleRegister src2) {
5945   add_d(dst, src1, src2);
5946 }
5947 
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)5948 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
5949     Register first, Register second, Register scratch1, Register scratch2,
5950     Label* failure) {
5951   const int kFlatOneByteStringMask =
5952       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5953   const int kFlatOneByteStringTag =
5954       kStringTag | kOneByteStringTag | kSeqStringTag;
5955   DCHECK(kFlatOneByteStringTag <= 0xffff);  // Ensure this fits 16-bit immed.
5956   andi(scratch1, first, kFlatOneByteStringMask);
5957   Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
5958   andi(scratch2, second, kFlatOneByteStringMask);
5959   Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
5960 }
5961 
5962 static const int kRegisterPassedArguments = 4;
5963 
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)5964 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5965                                               int num_double_arguments) {
5966   int stack_passed_words = 0;
5967   num_reg_arguments += 2 * num_double_arguments;
5968 
5969   // Up to four simple arguments are passed in registers a0..a3.
5970   if (num_reg_arguments > kRegisterPassedArguments) {
5971     stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5972   }
5973   stack_passed_words += kCArgSlotCount;
5974   return stack_passed_words;
5975 }
5976 
5977 
EmitSeqStringSetCharCheck(Register string,Register index,Register value,Register scratch,uint32_t encoding_mask)5978 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5979                                                Register index,
5980                                                Register value,
5981                                                Register scratch,
5982                                                uint32_t encoding_mask) {
5983   Label is_object;
5984   SmiTst(string, at);
5985   Check(ne, kNonObject, at, Operand(zero_reg));
5986 
5987   lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
5988   lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5989 
5990   andi(at, at, kStringRepresentationMask | kStringEncodingMask);
5991   li(scratch, Operand(encoding_mask));
5992   Check(eq, kUnexpectedStringType, at, Operand(scratch));
5993 
5994   // The index is assumed to be untagged coming in, tag it to compare with the
5995   // string length without using a temp register, it is restored at the end of
5996   // this function.
5997   Label index_tag_ok, index_tag_bad;
5998   TrySmiTag(index, scratch, &index_tag_bad);
5999   Branch(&index_tag_ok);
6000   bind(&index_tag_bad);
6001   Abort(kIndexIsTooLarge);
6002   bind(&index_tag_ok);
6003 
6004   lw(at, FieldMemOperand(string, String::kLengthOffset));
6005   Check(lt, kIndexIsTooLarge, index, Operand(at));
6006 
6007   DCHECK(Smi::kZero == 0);
6008   Check(ge, kIndexIsNegative, index, Operand(zero_reg));
6009 
6010   SmiUntag(index, index);
6011 }
6012 
6013 
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)6014 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6015                                           int num_double_arguments,
6016                                           Register scratch) {
6017   int frame_alignment = ActivationFrameAlignment();
6018 
6019   // Up to four simple arguments are passed in registers a0..a3.
6020   // Those four arguments must have reserved argument slots on the stack for
6021   // mips, even though those argument slots are not normally used.
6022   // Remaining arguments are pushed on the stack, above (higher address than)
6023   // the argument slots.
6024   int stack_passed_arguments = CalculateStackPassedWords(
6025       num_reg_arguments, num_double_arguments);
6026   if (frame_alignment > kPointerSize) {
6027     // Make stack end at alignment and make room for num_arguments - 4 words
6028     // and the original value of sp.
6029     mov(scratch, sp);
6030     Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
6031     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6032     And(sp, sp, Operand(-frame_alignment));
6033     sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
6034   } else {
6035     Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6036   }
6037 }
6038 
6039 
PrepareCallCFunction(int num_reg_arguments,Register scratch)6040 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6041                                           Register scratch) {
6042   PrepareCallCFunction(num_reg_arguments, 0, scratch);
6043 }
6044 
6045 
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)6046 void MacroAssembler::CallCFunction(ExternalReference function,
6047                                    int num_reg_arguments,
6048                                    int num_double_arguments) {
6049   li(t8, Operand(function));
6050   CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
6051 }
6052 
6053 
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)6054 void MacroAssembler::CallCFunction(Register function,
6055                                    int num_reg_arguments,
6056                                    int num_double_arguments) {
6057   CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
6058 }
6059 
6060 
CallCFunction(ExternalReference function,int num_arguments)6061 void MacroAssembler::CallCFunction(ExternalReference function,
6062                                    int num_arguments) {
6063   CallCFunction(function, num_arguments, 0);
6064 }
6065 
6066 
CallCFunction(Register function,int num_arguments)6067 void MacroAssembler::CallCFunction(Register function,
6068                                    int num_arguments) {
6069   CallCFunction(function, num_arguments, 0);
6070 }
6071 
6072 
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)6073 void MacroAssembler::CallCFunctionHelper(Register function,
6074                                          int num_reg_arguments,
6075                                          int num_double_arguments) {
6076   DCHECK(has_frame());
6077   // Make sure that the stack is aligned before calling a C function unless
6078   // running in the simulator. The simulator has its own alignment check which
6079   // provides more information.
6080   // The argument stots are presumed to have been set up by
6081   // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
6082 
6083 #if V8_HOST_ARCH_MIPS
6084   if (emit_debug_code()) {
6085     int frame_alignment = base::OS::ActivationFrameAlignment();
6086     int frame_alignment_mask = frame_alignment - 1;
6087     if (frame_alignment > kPointerSize) {
6088       DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6089       Label alignment_as_expected;
6090       And(at, sp, Operand(frame_alignment_mask));
6091       Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
6092       // Don't use Check here, as it will call Runtime_Abort possibly
6093       // re-entering here.
6094       stop("Unexpected alignment in CallCFunction");
6095       bind(&alignment_as_expected);
6096     }
6097   }
6098 #endif  // V8_HOST_ARCH_MIPS
6099 
6100   // Just call directly. The function called cannot cause a GC, or
6101   // allow preemption, so the return address in the link register
6102   // stays correct.
6103 
6104   if (!function.is(t9)) {
6105     mov(t9, function);
6106     function = t9;
6107   }
6108 
6109   Call(function);
6110 
6111   int stack_passed_arguments = CalculateStackPassedWords(
6112       num_reg_arguments, num_double_arguments);
6113 
6114   if (base::OS::ActivationFrameAlignment() > kPointerSize) {
6115     lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
6116   } else {
6117     Addu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6118   }
6119 }
6120 
6121 
6122 #undef BRANCH_ARGS_CHECK
6123 
6124 
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)6125 void MacroAssembler::CheckPageFlag(
6126     Register object,
6127     Register scratch,
6128     int mask,
6129     Condition cc,
6130     Label* condition_met) {
6131   And(scratch, object, Operand(~Page::kPageAlignmentMask));
6132   lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
6133   And(scratch, scratch, Operand(mask));
6134   Branch(condition_met, cc, scratch, Operand(zero_reg));
6135 }
6136 
6137 
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)6138 void MacroAssembler::JumpIfBlack(Register object,
6139                                  Register scratch0,
6140                                  Register scratch1,
6141                                  Label* on_black) {
6142   HasColor(object, scratch0, scratch1, on_black, 1, 1);  // kBlackBitPattern.
6143   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
6144 }
6145 
6146 
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)6147 void MacroAssembler::HasColor(Register object,
6148                               Register bitmap_scratch,
6149                               Register mask_scratch,
6150                               Label* has_color,
6151                               int first_bit,
6152                               int second_bit) {
6153   DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
6154   DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
6155 
6156   GetMarkBits(object, bitmap_scratch, mask_scratch);
6157 
6158   Label other_color, word_boundary;
6159   lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6160   And(t8, t9, Operand(mask_scratch));
6161   Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
6162   // Shift left 1 by adding.
6163   Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
6164   Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
6165   And(t8, t9, Operand(mask_scratch));
6166   Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
6167   jmp(&other_color);
6168 
6169   bind(&word_boundary);
6170   lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
6171   And(t9, t9, Operand(1));
6172   Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
6173   bind(&other_color);
6174 }
6175 
6176 
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)6177 void MacroAssembler::GetMarkBits(Register addr_reg,
6178                                  Register bitmap_reg,
6179                                  Register mask_reg) {
6180   DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
6181   And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
6182   Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
6183   const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
6184   Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
6185   Lsa(bitmap_reg, bitmap_reg, t8, kPointerSizeLog2, t8);
6186   li(t8, Operand(1));
6187   sllv(mask_reg, t8, mask_reg);
6188 }
6189 
6190 
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white)6191 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
6192                                  Register mask_scratch, Register load_scratch,
6193                                  Label* value_is_white) {
6194   DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
6195   GetMarkBits(value, bitmap_scratch, mask_scratch);
6196 
6197   // If the value is black or grey we don't need to do anything.
6198   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
6199   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
6200   DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
6201   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
6202 
6203   // Since both black and grey have a 1 in the first position and white does
6204   // not have a 1 there we only need to check one bit.
6205   lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6206   And(t8, mask_scratch, load_scratch);
6207   Branch(value_is_white, eq, t8, Operand(zero_reg));
6208 }
6209 
6210 
LoadInstanceDescriptors(Register map,Register descriptors)6211 void MacroAssembler::LoadInstanceDescriptors(Register map,
6212                                              Register descriptors) {
6213   lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
6214 }
6215 
6216 
NumberOfOwnDescriptors(Register dst,Register map)6217 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
6218   lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
6219   DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
6220 }
6221 
6222 
EnumLength(Register dst,Register map)6223 void MacroAssembler::EnumLength(Register dst, Register map) {
6224   STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
6225   lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
6226   And(dst, dst, Operand(Map::EnumLengthBits::kMask));
6227   SmiTag(dst);
6228 }
6229 
6230 
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)6231 void MacroAssembler::LoadAccessor(Register dst, Register holder,
6232                                   int accessor_index,
6233                                   AccessorComponent accessor) {
6234   lw(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
6235   LoadInstanceDescriptors(dst, dst);
6236   lw(dst,
6237      FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
6238   int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
6239                                            : AccessorPair::kSetterOffset;
6240   lw(dst, FieldMemOperand(dst, offset));
6241 }
6242 
6243 
CheckEnumCache(Label * call_runtime)6244 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
6245   Register null_value = t1;
6246   Register  empty_fixed_array_value = t2;
6247   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
6248   Label next, start;
6249   mov(a2, a0);
6250 
6251   // Check if the enum length field is properly initialized, indicating that
6252   // there is an enum cache.
6253   lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6254 
6255   EnumLength(a3, a1);
6256   Branch(
6257       call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
6258 
6259   LoadRoot(null_value, Heap::kNullValueRootIndex);
6260   jmp(&start);
6261 
6262   bind(&next);
6263   lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6264 
6265   // For all objects but the receiver, check that the cache is empty.
6266   EnumLength(a3, a1);
6267   Branch(call_runtime, ne, a3, Operand(Smi::kZero));
6268 
6269   bind(&start);
6270 
6271   // Check that there are no elements. Register a2 contains the current JS
6272   // object we've reached through the prototype chain.
6273   Label no_elements;
6274   lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
6275   Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
6276 
6277   // Second chance, the object may be using the empty slow element dictionary.
6278   LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
6279   Branch(call_runtime, ne, a2, Operand(at));
6280 
6281   bind(&no_elements);
6282   lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
6283   Branch(&next, ne, a2, Operand(null_value));
6284 }
6285 
6286 
ClampUint8(Register output_reg,Register input_reg)6287 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
6288   DCHECK(!output_reg.is(input_reg));
6289   Label done;
6290   li(output_reg, Operand(255));
6291   // Normal branch: nop in delay slot.
6292   Branch(&done, gt, input_reg, Operand(output_reg));
6293   // Use delay slot in this branch.
6294   Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
6295   mov(output_reg, zero_reg);  // In delay slot.
6296   mov(output_reg, input_reg);  // Value is in range 0..255.
6297   bind(&done);
6298 }
6299 
6300 
ClampDoubleToUint8(Register result_reg,DoubleRegister input_reg,DoubleRegister temp_double_reg)6301 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
6302                                         DoubleRegister input_reg,
6303                                         DoubleRegister temp_double_reg) {
6304   Label above_zero;
6305   Label done;
6306   Label in_bounds;
6307 
6308   Move(temp_double_reg, 0.0);
6309   BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
6310 
6311   // Double value is less than zero, NaN or Inf, return 0.
6312   mov(result_reg, zero_reg);
6313   Branch(&done);
6314 
6315   // Double value is >= 255, return 255.
6316   bind(&above_zero);
6317   Move(temp_double_reg, 255.0);
6318   BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
6319   li(result_reg, Operand(255));
6320   Branch(&done);
6321 
6322   // In 0-255 range, round and truncate.
6323   bind(&in_bounds);
6324   cvt_w_d(temp_double_reg, input_reg);
6325   mfc1(result_reg, temp_double_reg);
6326   bind(&done);
6327 }
6328 
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found)6329 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
6330                                                      Register scratch_reg,
6331                                                      Label* no_memento_found) {
6332   Label map_check;
6333   Label top_check;
6334   ExternalReference new_space_allocation_top_adr =
6335       ExternalReference::new_space_allocation_top_address(isolate());
6336   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
6337   const int kMementoLastWordOffset =
6338       kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
6339 
6340   // Bail out if the object is not in new space.
6341   JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
6342   // If the object is in new space, we need to check whether it is on the same
6343   // page as the current top.
6344   Addu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
6345   li(at, Operand(new_space_allocation_top_adr));
6346   lw(at, MemOperand(at));
6347   Xor(scratch_reg, scratch_reg, Operand(at));
6348   And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
6349   Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
6350   // The object is on a different page than allocation top. Bail out if the
6351   // object sits on the page boundary as no memento can follow and we cannot
6352   // touch the memory following it.
6353   Addu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
6354   Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
6355   And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
6356   Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
6357   // Continue with the actual map check.
6358   jmp(&map_check);
6359   // If top is on the same page as the current object, we need to check whether
6360   // we are below top.
6361   bind(&top_check);
6362   Addu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
6363   li(at, Operand(new_space_allocation_top_adr));
6364   lw(at, MemOperand(at));
6365   Branch(no_memento_found, ge, scratch_reg, Operand(at));
6366   // Memento map check.
6367   bind(&map_check);
6368   lw(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
6369   Branch(no_memento_found, ne, scratch_reg,
6370          Operand(isolate()->factory()->allocation_memento_map()));
6371 }
6372 
6373 
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)6374 Register GetRegisterThatIsNotOneOf(Register reg1,
6375                                    Register reg2,
6376                                    Register reg3,
6377                                    Register reg4,
6378                                    Register reg5,
6379                                    Register reg6) {
6380   RegList regs = 0;
6381   if (reg1.is_valid()) regs |= reg1.bit();
6382   if (reg2.is_valid()) regs |= reg2.bit();
6383   if (reg3.is_valid()) regs |= reg3.bit();
6384   if (reg4.is_valid()) regs |= reg4.bit();
6385   if (reg5.is_valid()) regs |= reg5.bit();
6386   if (reg6.is_valid()) regs |= reg6.bit();
6387 
6388   const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
6389   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
6390     int code = config->GetAllocatableGeneralCode(i);
6391     Register candidate = Register::from_code(code);
6392     if (regs & candidate.bit()) continue;
6393     return candidate;
6394   }
6395   UNREACHABLE();
6396   return no_reg;
6397 }
6398 
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8,Register reg9,Register reg10)6399 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
6400                 Register reg5, Register reg6, Register reg7, Register reg8,
6401                 Register reg9, Register reg10) {
6402   int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
6403                         reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
6404                         reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
6405                         reg10.is_valid();
6406 
6407   RegList regs = 0;
6408   if (reg1.is_valid()) regs |= reg1.bit();
6409   if (reg2.is_valid()) regs |= reg2.bit();
6410   if (reg3.is_valid()) regs |= reg3.bit();
6411   if (reg4.is_valid()) regs |= reg4.bit();
6412   if (reg5.is_valid()) regs |= reg5.bit();
6413   if (reg6.is_valid()) regs |= reg6.bit();
6414   if (reg7.is_valid()) regs |= reg7.bit();
6415   if (reg8.is_valid()) regs |= reg8.bit();
6416   if (reg9.is_valid()) regs |= reg9.bit();
6417   if (reg10.is_valid()) regs |= reg10.bit();
6418   int n_of_non_aliasing_regs = NumRegs(regs);
6419 
6420   return n_of_valid_regs != n_of_non_aliasing_regs;
6421 }
6422 
6423 
CodePatcher(Isolate * isolate,byte * address,int instructions,FlushICache flush_cache)6424 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
6425                          FlushICache flush_cache)
6426     : address_(address),
6427       size_(instructions * Assembler::kInstrSize),
6428       masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
6429       flush_cache_(flush_cache) {
6430   // Create a new macro assembler pointing to the address of the code to patch.
6431   // The size is adjusted with kGap on order for the assembler to generate size
6432   // bytes of instructions without failing with buffer size constraints.
6433   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6434 }
6435 
6436 
~CodePatcher()6437 CodePatcher::~CodePatcher() {
6438   // Indicate that code has changed.
6439   if (flush_cache_ == FLUSH) {
6440     Assembler::FlushICache(masm_.isolate(), address_, size_);
6441   }
6442 
6443   // Check that the code was patched as expected.
6444   DCHECK(masm_.pc_ == address_ + size_);
6445   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6446 }
6447 
6448 
Emit(Instr instr)6449 void CodePatcher::Emit(Instr instr) {
6450   masm()->emit(instr);
6451 }
6452 
6453 
Emit(Address addr)6454 void CodePatcher::Emit(Address addr) {
6455   masm()->emit(reinterpret_cast<Instr>(addr));
6456 }
6457 
6458 
ChangeBranchCondition(Instr current_instr,uint32_t new_opcode)6459 void CodePatcher::ChangeBranchCondition(Instr current_instr,
6460                                         uint32_t new_opcode) {
6461   current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
6462   masm_.emit(current_instr);
6463 }
6464 
6465 
TruncatingDiv(Register result,Register dividend,int32_t divisor)6466 void MacroAssembler::TruncatingDiv(Register result,
6467                                    Register dividend,
6468                                    int32_t divisor) {
6469   DCHECK(!dividend.is(result));
6470   DCHECK(!dividend.is(at));
6471   DCHECK(!result.is(at));
6472   base::MagicNumbersForDivision<uint32_t> mag =
6473       base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
6474   li(at, Operand(mag.multiplier));
6475   Mulh(result, dividend, Operand(at));
6476   bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
6477   if (divisor > 0 && neg) {
6478     Addu(result, result, Operand(dividend));
6479   }
6480   if (divisor < 0 && !neg && mag.multiplier > 0) {
6481     Subu(result, result, Operand(dividend));
6482   }
6483   if (mag.shift > 0) sra(result, result, mag.shift);
6484   srl(at, dividend, 31);
6485   Addu(result, result, Operand(at));
6486 }
6487 
6488 
6489 }  // namespace internal
6490 }  // namespace v8
6491 
6492 #endif  // V8_TARGET_ARCH_MIPS
6493