• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <limits.h>  // For LONG_MIN, LONG_MAX.
6 
7 #if V8_TARGET_ARCH_MIPS
8 
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/debug/debug.h"
14 #include "src/mips/macro-assembler-mips.h"
15 #include "src/register-configuration.h"
16 #include "src/runtime/runtime.h"
17 
18 namespace v8 {
19 namespace internal {
20 
21 // Floating point constants.
22 const uint32_t kDoubleSignMask = HeapNumber::kSignMask;
23 const uint32_t kDoubleExponentShift = HeapNumber::kExponentShift;
24 const uint32_t kDoubleNaNShift = kDoubleExponentShift - 1;
25 const uint32_t kDoubleNaNMask =
26     HeapNumber::kExponentMask | (1 << kDoubleNaNShift);
27 
28 const uint32_t kSingleSignMask = kBinary32SignMask;
29 const uint32_t kSingleExponentMask = kBinary32ExponentMask;
30 const uint32_t kSingleExponentShift = kBinary32ExponentShift;
31 const uint32_t kSingleNaNShift = kSingleExponentShift - 1;
32 const uint32_t kSingleNaNMask = kSingleExponentMask | (1 << kSingleNaNShift);
33 
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)34 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
35                                CodeObjectRequired create_code_object)
36     : Assembler(arg_isolate, buffer, size),
37       generating_stub_(false),
38       has_frame_(false),
39       has_double_zero_reg_set_(false) {
40   if (create_code_object == CodeObjectRequired::kYes) {
41     code_object_ =
42         Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
43   }
44 }
45 
Load(Register dst,const MemOperand & src,Representation r)46 void MacroAssembler::Load(Register dst,
47                           const MemOperand& src,
48                           Representation r) {
49   DCHECK(!r.IsDouble());
50   if (r.IsInteger8()) {
51     lb(dst, src);
52   } else if (r.IsUInteger8()) {
53     lbu(dst, src);
54   } else if (r.IsInteger16()) {
55     lh(dst, src);
56   } else if (r.IsUInteger16()) {
57     lhu(dst, src);
58   } else {
59     lw(dst, src);
60   }
61 }
62 
63 
Store(Register src,const MemOperand & dst,Representation r)64 void MacroAssembler::Store(Register src,
65                            const MemOperand& dst,
66                            Representation r) {
67   DCHECK(!r.IsDouble());
68   if (r.IsInteger8() || r.IsUInteger8()) {
69     sb(src, dst);
70   } else if (r.IsInteger16() || r.IsUInteger16()) {
71     sh(src, dst);
72   } else {
73     if (r.IsHeapObject()) {
74       AssertNotSmi(src);
75     } else if (r.IsSmi()) {
76       AssertSmi(src);
77     }
78     sw(src, dst);
79   }
80 }
81 
LoadRoot(Register destination,Heap::RootListIndex index)82 void MacroAssembler::LoadRoot(Register destination,
83                               Heap::RootListIndex index) {
84   lw(destination, MemOperand(s6, index << kPointerSizeLog2));
85 }
86 
87 
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)88 void MacroAssembler::LoadRoot(Register destination,
89                               Heap::RootListIndex index,
90                               Condition cond,
91                               Register src1, const Operand& src2) {
92   Branch(2, NegateCondition(cond), src1, src2);
93   lw(destination, MemOperand(s6, index << kPointerSizeLog2));
94 }
95 
96 
StoreRoot(Register source,Heap::RootListIndex index)97 void MacroAssembler::StoreRoot(Register source,
98                                Heap::RootListIndex index) {
99   DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
100   sw(source, MemOperand(s6, index << kPointerSizeLog2));
101 }
102 
103 
StoreRoot(Register source,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)104 void MacroAssembler::StoreRoot(Register source,
105                                Heap::RootListIndex index,
106                                Condition cond,
107                                Register src1, const Operand& src2) {
108   DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
109   Branch(2, NegateCondition(cond), src1, src2);
110   sw(source, MemOperand(s6, index << kPointerSizeLog2));
111 }
112 
PushCommonFrame(Register marker_reg)113 void MacroAssembler::PushCommonFrame(Register marker_reg) {
114   if (marker_reg.is_valid()) {
115     Push(ra, fp, marker_reg);
116     Addu(fp, sp, Operand(kPointerSize));
117   } else {
118     Push(ra, fp);
119     mov(fp, sp);
120   }
121 }
122 
PopCommonFrame(Register marker_reg)123 void MacroAssembler::PopCommonFrame(Register marker_reg) {
124   if (marker_reg.is_valid()) {
125     Pop(ra, fp, marker_reg);
126   } else {
127     Pop(ra, fp);
128   }
129 }
130 
PushStandardFrame(Register function_reg)131 void MacroAssembler::PushStandardFrame(Register function_reg) {
132   int offset = -StandardFrameConstants::kContextOffset;
133   if (function_reg.is_valid()) {
134     Push(ra, fp, cp, function_reg);
135     offset += kPointerSize;
136   } else {
137     Push(ra, fp, cp);
138   }
139   Addu(fp, sp, Operand(offset));
140 }
141 
142 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()143 void MacroAssembler::PushSafepointRegisters() {
144   // Safepoints expect a block of kNumSafepointRegisters values on the
145   // stack, so adjust the stack for unsaved registers.
146   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
147   DCHECK(num_unsaved >= 0);
148   if (num_unsaved > 0) {
149     Subu(sp, sp, Operand(num_unsaved * kPointerSize));
150   }
151   MultiPush(kSafepointSavedRegisters);
152 }
153 
154 
PopSafepointRegisters()155 void MacroAssembler::PopSafepointRegisters() {
156   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
157   MultiPop(kSafepointSavedRegisters);
158   if (num_unsaved > 0) {
159     Addu(sp, sp, Operand(num_unsaved * kPointerSize));
160   }
161 }
162 
163 
StoreToSafepointRegisterSlot(Register src,Register dst)164 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
165   sw(src, SafepointRegisterSlot(dst));
166 }
167 
168 
LoadFromSafepointRegisterSlot(Register dst,Register src)169 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
170   lw(dst, SafepointRegisterSlot(src));
171 }
172 
173 
SafepointRegisterStackIndex(int reg_code)174 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
175   // The registers are pushed starting with the highest encoding,
176   // which means that lowest encodings are closest to the stack pointer.
177   return kSafepointRegisterStackIndexMap[reg_code];
178 }
179 
180 
SafepointRegisterSlot(Register reg)181 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
182   return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
183 }
184 
185 
SafepointRegistersAndDoublesSlot(Register reg)186 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
187   UNIMPLEMENTED_MIPS();
188   // General purpose registers are pushed last on the stack.
189   int doubles_size = DoubleRegister::kMaxNumRegisters * kDoubleSize;
190   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
191   return MemOperand(sp, doubles_size + register_offset);
192 }
193 
194 
InNewSpace(Register object,Register scratch,Condition cc,Label * branch)195 void MacroAssembler::InNewSpace(Register object,
196                                 Register scratch,
197                                 Condition cc,
198                                 Label* branch) {
199   DCHECK(cc == eq || cc == ne);
200   const int mask =
201       1 << MemoryChunk::IN_FROM_SPACE | 1 << MemoryChunk::IN_TO_SPACE;
202   CheckPageFlag(object, scratch, mask, cc, branch);
203 }
204 
205 
206 // Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
207 // The register 'object' contains a heap object pointer.  The heap object
208 // tag is shifted away.
RecordWriteField(Register object,int offset,Register value,Register dst,RAStatus ra_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)209 void MacroAssembler::RecordWriteField(
210     Register object,
211     int offset,
212     Register value,
213     Register dst,
214     RAStatus ra_status,
215     SaveFPRegsMode save_fp,
216     RememberedSetAction remembered_set_action,
217     SmiCheck smi_check,
218     PointersToHereCheck pointers_to_here_check_for_value) {
219   DCHECK(!AreAliased(value, dst, t8, object));
220   // First, check if a write barrier is even needed. The tests below
221   // catch stores of Smis.
222   Label done;
223 
224   // Skip barrier if writing a smi.
225   if (smi_check == INLINE_SMI_CHECK) {
226     JumpIfSmi(value, &done);
227   }
228 
229   // Although the object register is tagged, the offset is relative to the start
230   // of the object, so so offset must be a multiple of kPointerSize.
231   DCHECK(IsAligned(offset, kPointerSize));
232 
233   Addu(dst, object, Operand(offset - kHeapObjectTag));
234   if (emit_debug_code()) {
235     Label ok;
236     And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
237     Branch(&ok, eq, t8, Operand(zero_reg));
238     stop("Unaligned cell in write barrier");
239     bind(&ok);
240   }
241 
242   RecordWrite(object,
243               dst,
244               value,
245               ra_status,
246               save_fp,
247               remembered_set_action,
248               OMIT_SMI_CHECK,
249               pointers_to_here_check_for_value);
250 
251   bind(&done);
252 
253   // Clobber clobbered input registers when running with the debug-code flag
254   // turned on to provoke errors.
255   if (emit_debug_code()) {
256     li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
257     li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
258   }
259 }
260 
261 
262 // Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
RecordWriteForMap(Register object,Register map,Register dst,RAStatus ra_status,SaveFPRegsMode fp_mode)263 void MacroAssembler::RecordWriteForMap(Register object,
264                                        Register map,
265                                        Register dst,
266                                        RAStatus ra_status,
267                                        SaveFPRegsMode fp_mode) {
268   if (emit_debug_code()) {
269     DCHECK(!dst.is(at));
270     lw(dst, FieldMemOperand(map, HeapObject::kMapOffset));
271     Check(eq,
272           kWrongAddressOrValuePassedToRecordWrite,
273           dst,
274           Operand(isolate()->factory()->meta_map()));
275   }
276 
277   if (!FLAG_incremental_marking) {
278     return;
279   }
280 
281   if (emit_debug_code()) {
282     lw(at, FieldMemOperand(object, HeapObject::kMapOffset));
283     Check(eq,
284           kWrongAddressOrValuePassedToRecordWrite,
285           map,
286           Operand(at));
287   }
288 
289   Label done;
290 
291   // A single check of the map's pages interesting flag suffices, since it is
292   // only set during incremental collection, and then it's also guaranteed that
293   // the from object's page's interesting flag is also set.  This optimization
294   // relies on the fact that maps can never be in new space.
295   CheckPageFlag(map,
296                 map,  // Used as scratch.
297                 MemoryChunk::kPointersToHereAreInterestingMask,
298                 eq,
299                 &done);
300 
301   Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
302   if (emit_debug_code()) {
303     Label ok;
304     And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
305     Branch(&ok, eq, at, Operand(zero_reg));
306     stop("Unaligned cell in write barrier");
307     bind(&ok);
308   }
309 
310   // Record the actual write.
311   if (ra_status == kRAHasNotBeenSaved) {
312     push(ra);
313   }
314   RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
315                        fp_mode);
316   CallStub(&stub);
317   if (ra_status == kRAHasNotBeenSaved) {
318     pop(ra);
319   }
320 
321   bind(&done);
322 
323   // Count number of write barriers in generated code.
324   isolate()->counters()->write_barriers_static()->Increment();
325   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
326 
327   // Clobber clobbered registers when running with the debug-code flag
328   // turned on to provoke errors.
329   if (emit_debug_code()) {
330     li(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
331     li(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
332   }
333 }
334 
335 
336 // Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
337 // The register 'object' contains a heap object pointer.  The heap object
338 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,RAStatus ra_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)339 void MacroAssembler::RecordWrite(
340     Register object,
341     Register address,
342     Register value,
343     RAStatus ra_status,
344     SaveFPRegsMode fp_mode,
345     RememberedSetAction remembered_set_action,
346     SmiCheck smi_check,
347     PointersToHereCheck pointers_to_here_check_for_value) {
348   DCHECK(!AreAliased(object, address, value, t8));
349   DCHECK(!AreAliased(object, address, value, t9));
350 
351   if (emit_debug_code()) {
352     lw(at, MemOperand(address));
353     Assert(
354         eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
355   }
356 
357   if (remembered_set_action == OMIT_REMEMBERED_SET &&
358       !FLAG_incremental_marking) {
359     return;
360   }
361 
362   // First, check if a write barrier is even needed. The tests below
363   // catch stores of smis and stores into the young generation.
364   Label done;
365 
366   if (smi_check == INLINE_SMI_CHECK) {
367     DCHECK_EQ(0, kSmiTag);
368     JumpIfSmi(value, &done);
369   }
370 
371   if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
372     CheckPageFlag(value,
373                   value,  // Used as scratch.
374                   MemoryChunk::kPointersToHereAreInterestingMask,
375                   eq,
376                   &done);
377   }
378   CheckPageFlag(object,
379                 value,  // Used as scratch.
380                 MemoryChunk::kPointersFromHereAreInterestingMask,
381                 eq,
382                 &done);
383 
384   // Record the actual write.
385   if (ra_status == kRAHasNotBeenSaved) {
386     push(ra);
387   }
388   RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
389                        fp_mode);
390   CallStub(&stub);
391   if (ra_status == kRAHasNotBeenSaved) {
392     pop(ra);
393   }
394 
395   bind(&done);
396 
397   // Count number of write barriers in generated code.
398   isolate()->counters()->write_barriers_static()->Increment();
399   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
400                    value);
401 
402   // Clobber clobbered registers when running with the debug-code flag
403   // turned on to provoke errors.
404   if (emit_debug_code()) {
405     li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
406     li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
407   }
408 }
409 
RecordWriteCodeEntryField(Register js_function,Register code_entry,Register scratch)410 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
411                                                Register code_entry,
412                                                Register scratch) {
413   const int offset = JSFunction::kCodeEntryOffset;
414 
415   // Since a code entry (value) is always in old space, we don't need to update
416   // remembered set. If incremental marking is off, there is nothing for us to
417   // do.
418   if (!FLAG_incremental_marking) return;
419 
420   DCHECK(js_function.is(a1));
421   DCHECK(code_entry.is(t0));
422   DCHECK(scratch.is(t1));
423   AssertNotSmi(js_function);
424 
425   if (emit_debug_code()) {
426     Addu(scratch, js_function, Operand(offset - kHeapObjectTag));
427     lw(at, MemOperand(scratch));
428     Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
429            Operand(code_entry));
430   }
431 
432   // First, check if a write barrier is even needed. The tests below
433   // catch stores of Smis and stores into young gen.
434   Label done;
435 
436   CheckPageFlag(code_entry, scratch,
437                 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
438   CheckPageFlag(js_function, scratch,
439                 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
440 
441   const Register dst = scratch;
442   Addu(dst, js_function, Operand(offset - kHeapObjectTag));
443 
444   // Save caller-saved registers. js_function and code_entry are in the
445   // caller-saved register list.
446   DCHECK(kJSCallerSaved & js_function.bit());
447   DCHECK(kJSCallerSaved & code_entry.bit());
448   MultiPush(kJSCallerSaved | ra.bit());
449 
450   int argument_count = 3;
451 
452   PrepareCallCFunction(argument_count, 0, code_entry);
453 
454   mov(a0, js_function);
455   mov(a1, dst);
456   li(a2, Operand(ExternalReference::isolate_address(isolate())));
457 
458   {
459     AllowExternalCallThatCantCauseGC scope(this);
460     CallCFunction(
461         ExternalReference::incremental_marking_record_write_code_entry_function(
462             isolate()),
463         argument_count);
464   }
465 
466   // Restore caller-saved registers.
467   MultiPop(kJSCallerSaved | ra.bit());
468 
469   bind(&done);
470 }
471 
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)472 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
473                                          Register address,
474                                          Register scratch,
475                                          SaveFPRegsMode fp_mode,
476                                          RememberedSetFinalAction and_then) {
477   Label done;
478   if (emit_debug_code()) {
479     Label ok;
480     JumpIfNotInNewSpace(object, scratch, &ok);
481     stop("Remembered set pointer is in new space");
482     bind(&ok);
483   }
484   // Load store buffer top.
485   ExternalReference store_buffer =
486       ExternalReference::store_buffer_top(isolate());
487   li(t8, Operand(store_buffer));
488   lw(scratch, MemOperand(t8));
489   // Store pointer to buffer and increment buffer top.
490   sw(address, MemOperand(scratch));
491   Addu(scratch, scratch, kPointerSize);
492   // Write back new top of buffer.
493   sw(scratch, MemOperand(t8));
494   // Call stub on end of buffer.
495   // Check for end of buffer.
496   And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
497   if (and_then == kFallThroughAtEnd) {
498     Branch(&done, ne, t8, Operand(zero_reg));
499   } else {
500     DCHECK(and_then == kReturnAtEnd);
501     Ret(ne, t8, Operand(zero_reg));
502   }
503   push(ra);
504   StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
505   CallStub(&store_buffer_overflow);
506   pop(ra);
507   bind(&done);
508   if (and_then == kReturnAtEnd) {
509     Ret();
510   }
511 }
512 
513 
514 // -----------------------------------------------------------------------------
515 // Allocation support.
516 
517 
CheckAccessGlobalProxy(Register holder_reg,Register scratch,Label * miss)518 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
519                                             Register scratch,
520                                             Label* miss) {
521   Label same_contexts;
522   Register temporary = t8;
523 
524   DCHECK(!holder_reg.is(scratch));
525   DCHECK(!holder_reg.is(at));
526   DCHECK(!scratch.is(at));
527 
528   // Load current lexical context from the active StandardFrame, which
529   // may require crawling past STUB frames.
530   Label load_context;
531   Label has_context;
532   mov(at, fp);
533   bind(&load_context);
534   lw(scratch, MemOperand(at, CommonFrameConstants::kContextOrFrameTypeOffset));
535   // Passing temporary register, otherwise JumpIfNotSmi modifies register at.
536   JumpIfNotSmi(scratch, &has_context, temporary);
537   lw(at, MemOperand(at, CommonFrameConstants::kCallerFPOffset));
538   Branch(&load_context);
539   bind(&has_context);
540 
541   // In debug mode, make sure the lexical context is set.
542 #ifdef DEBUG
543   Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
544       scratch, Operand(zero_reg));
545 #endif
546 
547   // Load the native context of the current context.
548   lw(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
549 
550   // Check the context is a native context.
551   if (emit_debug_code()) {
552     push(holder_reg);  // Temporarily save holder on the stack.
553     // Read the first word and compare to the native_context_map.
554     lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
555     LoadRoot(at, Heap::kNativeContextMapRootIndex);
556     Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
557           holder_reg, Operand(at));
558     pop(holder_reg);  // Restore holder.
559   }
560 
561   // Check if both contexts are the same.
562   lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
563   Branch(&same_contexts, eq, scratch, Operand(at));
564 
565   // Check the context is a native context.
566   if (emit_debug_code()) {
567     push(holder_reg);  // Temporarily save holder on the stack.
568     mov(holder_reg, at);  // Move at to its holding place.
569     LoadRoot(at, Heap::kNullValueRootIndex);
570     Check(ne, kJSGlobalProxyContextShouldNotBeNull,
571           holder_reg, Operand(at));
572 
573     lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
574     LoadRoot(at, Heap::kNativeContextMapRootIndex);
575     Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
576           holder_reg, Operand(at));
577     // Restore at is not needed. at is reloaded below.
578     pop(holder_reg);  // Restore holder.
579     // Restore at to holder's context.
580     lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
581   }
582 
583   // Check that the security token in the calling global object is
584   // compatible with the security token in the receiving global
585   // object.
586   int token_offset = Context::kHeaderSize +
587                      Context::SECURITY_TOKEN_INDEX * kPointerSize;
588 
589   lw(scratch, FieldMemOperand(scratch, token_offset));
590   lw(at, FieldMemOperand(at, token_offset));
591   Branch(miss, ne, scratch, Operand(at));
592 
593   bind(&same_contexts);
594 }
595 
596 
597 // Compute the hash code from the untagged key.  This must be kept in sync with
598 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
599 // code-stub-hydrogen.cc
GetNumberHash(Register reg0,Register scratch)600 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
601   // First of all we assign the hash seed to scratch.
602   LoadRoot(scratch, Heap::kHashSeedRootIndex);
603   SmiUntag(scratch);
604 
605   // Xor original key with a seed.
606   xor_(reg0, reg0, scratch);
607 
608   // Compute the hash code from the untagged key.  This must be kept in sync
609   // with ComputeIntegerHash in utils.h.
610   //
611   // hash = ~hash + (hash << 15);
612   nor(scratch, reg0, zero_reg);
613   Lsa(reg0, scratch, reg0, 15);
614 
615   // hash = hash ^ (hash >> 12);
616   srl(at, reg0, 12);
617   xor_(reg0, reg0, at);
618 
619   // hash = hash + (hash << 2);
620   Lsa(reg0, reg0, reg0, 2);
621 
622   // hash = hash ^ (hash >> 4);
623   srl(at, reg0, 4);
624   xor_(reg0, reg0, at);
625 
626   // hash = hash * 2057;
627   sll(scratch, reg0, 11);
628   Lsa(reg0, reg0, reg0, 3);
629   addu(reg0, reg0, scratch);
630 
631   // hash = hash ^ (hash >> 16);
632   srl(at, reg0, 16);
633   xor_(reg0, reg0, at);
634   And(reg0, reg0, Operand(0x3fffffff));
635 }
636 
637 
LoadFromNumberDictionary(Label * miss,Register elements,Register key,Register result,Register reg0,Register reg1,Register reg2)638 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
639                                               Register elements,
640                                               Register key,
641                                               Register result,
642                                               Register reg0,
643                                               Register reg1,
644                                               Register reg2) {
645   // Register use:
646   //
647   // elements - holds the slow-case elements of the receiver on entry.
648   //            Unchanged unless 'result' is the same register.
649   //
650   // key      - holds the smi key on entry.
651   //            Unchanged unless 'result' is the same register.
652   //
653   //
654   // result   - holds the result on exit if the load succeeded.
655   //            Allowed to be the same as 'key' or 'result'.
656   //            Unchanged on bailout so 'key' or 'result' can be used
657   //            in further computation.
658   //
659   // Scratch registers:
660   //
661   // reg0 - holds the untagged key on entry and holds the hash once computed.
662   //
663   // reg1 - Used to hold the capacity mask of the dictionary.
664   //
665   // reg2 - Used for the index into the dictionary.
666   // at   - Temporary (avoid MacroAssembler instructions also using 'at').
667   Label done;
668 
669   GetNumberHash(reg0, reg1);
670 
671   // Compute the capacity mask.
672   lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
673   sra(reg1, reg1, kSmiTagSize);
674   Subu(reg1, reg1, Operand(1));
675 
676   // Generate an unrolled loop that performs a few probes before giving up.
677   for (int i = 0; i < kNumberDictionaryProbes; i++) {
678     // Use reg2 for index calculations and keep the hash intact in reg0.
679     mov(reg2, reg0);
680     // Compute the masked index: (hash + i + i * i) & mask.
681     if (i > 0) {
682       Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
683     }
684     and_(reg2, reg2, reg1);
685 
686     // Scale the index by multiplying by the element size.
687     DCHECK(SeededNumberDictionary::kEntrySize == 3);
688     Lsa(reg2, reg2, reg2, 1);  // reg2 = reg2 * 3.
689 
690     // Check if the key is identical to the name.
691     Lsa(reg2, elements, reg2, kPointerSizeLog2);
692 
693     lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
694     if (i != kNumberDictionaryProbes - 1) {
695       Branch(&done, eq, key, Operand(at));
696     } else {
697       Branch(miss, ne, key, Operand(at));
698     }
699   }
700 
701   bind(&done);
702   // Check that the value is a field property.
703   // reg2: elements + (index * kPointerSize).
704   const int kDetailsOffset =
705       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
706   lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
707   DCHECK_EQ(DATA, 0);
708   And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
709   Branch(miss, ne, at, Operand(zero_reg));
710 
711   // Get the value at the masked, scaled index and return.
712   const int kValueOffset =
713       SeededNumberDictionary::kElementsStartOffset + kPointerSize;
714   lw(result, FieldMemOperand(reg2, kValueOffset));
715 }
716 
717 
718 // ---------------------------------------------------------------------------
719 // Instruction macros.
720 
Addu(Register rd,Register rs,const Operand & rt)721 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
722   if (rt.is_reg()) {
723     addu(rd, rs, rt.rm());
724   } else {
725     if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
726       addiu(rd, rs, rt.imm32_);
727     } else {
728       // li handles the relocation.
729       DCHECK(!rs.is(at));
730       li(at, rt);
731       addu(rd, rs, at);
732     }
733   }
734 }
735 
736 
Subu(Register rd,Register rs,const Operand & rt)737 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
738   if (rt.is_reg()) {
739     subu(rd, rs, rt.rm());
740   } else {
741     if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
742       addiu(rd, rs, -rt.imm32_);  // No subiu instr, use addiu(x, y, -imm).
743     } else {
744       // li handles the relocation.
745       DCHECK(!rs.is(at));
746       li(at, rt);
747       subu(rd, rs, at);
748     }
749   }
750 }
751 
752 
Mul(Register rd,Register rs,const Operand & rt)753 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
754   if (rt.is_reg()) {
755     if (IsMipsArchVariant(kLoongson)) {
756       mult(rs, rt.rm());
757       mflo(rd);
758     } else {
759       mul(rd, rs, rt.rm());
760     }
761   } else {
762     // li handles the relocation.
763     DCHECK(!rs.is(at));
764     li(at, rt);
765     if (IsMipsArchVariant(kLoongson)) {
766       mult(rs, at);
767       mflo(rd);
768     } else {
769       mul(rd, rs, at);
770     }
771   }
772 }
773 
774 
Mul(Register rd_hi,Register rd_lo,Register rs,const Operand & rt)775 void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
776     Register rs, const Operand& rt) {
777   if (rt.is_reg()) {
778     if (!IsMipsArchVariant(kMips32r6)) {
779       mult(rs, rt.rm());
780       mflo(rd_lo);
781       mfhi(rd_hi);
782     } else {
783       if (rd_lo.is(rs)) {
784         DCHECK(!rd_hi.is(rs));
785         DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
786         muh(rd_hi, rs, rt.rm());
787         mul(rd_lo, rs, rt.rm());
788       } else {
789         DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
790         mul(rd_lo, rs, rt.rm());
791         muh(rd_hi, rs, rt.rm());
792       }
793     }
794   } else {
795     // li handles the relocation.
796     DCHECK(!rs.is(at));
797     li(at, rt);
798     if (!IsMipsArchVariant(kMips32r6)) {
799       mult(rs, at);
800       mflo(rd_lo);
801       mfhi(rd_hi);
802     } else {
803       if (rd_lo.is(rs)) {
804         DCHECK(!rd_hi.is(rs));
805         DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
806         muh(rd_hi, rs, at);
807         mul(rd_lo, rs, at);
808       } else {
809         DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
810         mul(rd_lo, rs, at);
811         muh(rd_hi, rs, at);
812       }
813     }
814   }
815 }
816 
Mulu(Register rd_hi,Register rd_lo,Register rs,const Operand & rt)817 void MacroAssembler::Mulu(Register rd_hi, Register rd_lo, Register rs,
818                           const Operand& rt) {
819   Register reg;
820   if (rt.is_reg()) {
821     reg = rt.rm();
822   } else {
823     DCHECK(!rs.is(at));
824     reg = at;
825     li(reg, rt);
826   }
827 
828   if (!IsMipsArchVariant(kMips32r6)) {
829     multu(rs, reg);
830     mflo(rd_lo);
831     mfhi(rd_hi);
832   } else {
833     if (rd_lo.is(rs)) {
834       DCHECK(!rd_hi.is(rs));
835       DCHECK(!rd_hi.is(reg) && !rd_lo.is(reg));
836       muhu(rd_hi, rs, reg);
837       mulu(rd_lo, rs, reg);
838     } else {
839       DCHECK(!rd_hi.is(reg) && !rd_lo.is(reg));
840       mulu(rd_lo, rs, reg);
841       muhu(rd_hi, rs, reg);
842     }
843   }
844 }
845 
Mulh(Register rd,Register rs,const Operand & rt)846 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
847   if (rt.is_reg()) {
848     if (!IsMipsArchVariant(kMips32r6)) {
849       mult(rs, rt.rm());
850       mfhi(rd);
851     } else {
852       muh(rd, rs, rt.rm());
853     }
854   } else {
855     // li handles the relocation.
856     DCHECK(!rs.is(at));
857     li(at, rt);
858     if (!IsMipsArchVariant(kMips32r6)) {
859       mult(rs, at);
860       mfhi(rd);
861     } else {
862       muh(rd, rs, at);
863     }
864   }
865 }
866 
867 
Mult(Register rs,const Operand & rt)868 void MacroAssembler::Mult(Register rs, const Operand& rt) {
869   if (rt.is_reg()) {
870     mult(rs, rt.rm());
871   } else {
872     // li handles the relocation.
873     DCHECK(!rs.is(at));
874     li(at, rt);
875     mult(rs, at);
876   }
877 }
878 
879 
Mulhu(Register rd,Register rs,const Operand & rt)880 void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
881   if (rt.is_reg()) {
882     if (!IsMipsArchVariant(kMips32r6)) {
883       multu(rs, rt.rm());
884       mfhi(rd);
885     } else {
886       muhu(rd, rs, rt.rm());
887     }
888   } else {
889     // li handles the relocation.
890     DCHECK(!rs.is(at));
891     li(at, rt);
892     if (!IsMipsArchVariant(kMips32r6)) {
893       multu(rs, at);
894       mfhi(rd);
895     } else {
896       muhu(rd, rs, at);
897     }
898   }
899 }
900 
901 
Multu(Register rs,const Operand & rt)902 void MacroAssembler::Multu(Register rs, const Operand& rt) {
903   if (rt.is_reg()) {
904     multu(rs, rt.rm());
905   } else {
906     // li handles the relocation.
907     DCHECK(!rs.is(at));
908     li(at, rt);
909     multu(rs, at);
910   }
911 }
912 
913 
Div(Register rs,const Operand & rt)914 void MacroAssembler::Div(Register rs, const Operand& rt) {
915   if (rt.is_reg()) {
916     div(rs, rt.rm());
917   } else {
918     // li handles the relocation.
919     DCHECK(!rs.is(at));
920     li(at, rt);
921     div(rs, at);
922   }
923 }
924 
925 
Div(Register rem,Register res,Register rs,const Operand & rt)926 void MacroAssembler::Div(Register rem, Register res,
927     Register rs, const Operand& rt) {
928   if (rt.is_reg()) {
929     if (!IsMipsArchVariant(kMips32r6)) {
930       div(rs, rt.rm());
931       mflo(res);
932       mfhi(rem);
933     } else {
934       div(res, rs, rt.rm());
935       mod(rem, rs, rt.rm());
936     }
937   } else {
938     // li handles the relocation.
939     DCHECK(!rs.is(at));
940     li(at, rt);
941     if (!IsMipsArchVariant(kMips32r6)) {
942       div(rs, at);
943       mflo(res);
944       mfhi(rem);
945     } else {
946       div(res, rs, at);
947       mod(rem, rs, at);
948     }
949   }
950 }
951 
952 
Div(Register res,Register rs,const Operand & rt)953 void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
954   if (rt.is_reg()) {
955     if (!IsMipsArchVariant(kMips32r6)) {
956       div(rs, rt.rm());
957       mflo(res);
958     } else {
959       div(res, rs, rt.rm());
960     }
961   } else {
962     // li handles the relocation.
963     DCHECK(!rs.is(at));
964     li(at, rt);
965     if (!IsMipsArchVariant(kMips32r6)) {
966       div(rs, at);
967       mflo(res);
968     } else {
969       div(res, rs, at);
970     }
971   }
972 }
973 
974 
Mod(Register rd,Register rs,const Operand & rt)975 void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
976   if (rt.is_reg()) {
977     if (!IsMipsArchVariant(kMips32r6)) {
978       div(rs, rt.rm());
979       mfhi(rd);
980     } else {
981       mod(rd, rs, rt.rm());
982     }
983   } else {
984     // li handles the relocation.
985     DCHECK(!rs.is(at));
986     li(at, rt);
987     if (!IsMipsArchVariant(kMips32r6)) {
988       div(rs, at);
989       mfhi(rd);
990     } else {
991       mod(rd, rs, at);
992     }
993   }
994 }
995 
996 
Modu(Register rd,Register rs,const Operand & rt)997 void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
998   if (rt.is_reg()) {
999     if (!IsMipsArchVariant(kMips32r6)) {
1000       divu(rs, rt.rm());
1001       mfhi(rd);
1002     } else {
1003       modu(rd, rs, rt.rm());
1004     }
1005   } else {
1006     // li handles the relocation.
1007     DCHECK(!rs.is(at));
1008     li(at, rt);
1009     if (!IsMipsArchVariant(kMips32r6)) {
1010       divu(rs, at);
1011       mfhi(rd);
1012     } else {
1013       modu(rd, rs, at);
1014     }
1015   }
1016 }
1017 
1018 
Divu(Register rs,const Operand & rt)1019 void MacroAssembler::Divu(Register rs, const Operand& rt) {
1020   if (rt.is_reg()) {
1021     divu(rs, rt.rm());
1022   } else {
1023     // li handles the relocation.
1024     DCHECK(!rs.is(at));
1025     li(at, rt);
1026     divu(rs, at);
1027   }
1028 }
1029 
1030 
Divu(Register res,Register rs,const Operand & rt)1031 void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
1032   if (rt.is_reg()) {
1033     if (!IsMipsArchVariant(kMips32r6)) {
1034       divu(rs, rt.rm());
1035       mflo(res);
1036     } else {
1037       divu(res, rs, rt.rm());
1038     }
1039   } else {
1040     // li handles the relocation.
1041     DCHECK(!rs.is(at));
1042     li(at, rt);
1043     if (!IsMipsArchVariant(kMips32r6)) {
1044       divu(rs, at);
1045       mflo(res);
1046     } else {
1047       divu(res, rs, at);
1048     }
1049   }
1050 }
1051 
1052 
And(Register rd,Register rs,const Operand & rt)1053 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
1054   if (rt.is_reg()) {
1055     and_(rd, rs, rt.rm());
1056   } else {
1057     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
1058       andi(rd, rs, rt.imm32_);
1059     } else {
1060       // li handles the relocation.
1061       DCHECK(!rs.is(at));
1062       li(at, rt);
1063       and_(rd, rs, at);
1064     }
1065   }
1066 }
1067 
1068 
Or(Register rd,Register rs,const Operand & rt)1069 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
1070   if (rt.is_reg()) {
1071     or_(rd, rs, rt.rm());
1072   } else {
1073     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
1074       ori(rd, rs, rt.imm32_);
1075     } else {
1076       // li handles the relocation.
1077       DCHECK(!rs.is(at));
1078       li(at, rt);
1079       or_(rd, rs, at);
1080     }
1081   }
1082 }
1083 
1084 
Xor(Register rd,Register rs,const Operand & rt)1085 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
1086   if (rt.is_reg()) {
1087     xor_(rd, rs, rt.rm());
1088   } else {
1089     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
1090       xori(rd, rs, rt.imm32_);
1091     } else {
1092       // li handles the relocation.
1093       DCHECK(!rs.is(at));
1094       li(at, rt);
1095       xor_(rd, rs, at);
1096     }
1097   }
1098 }
1099 
1100 
Nor(Register rd,Register rs,const Operand & rt)1101 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
1102   if (rt.is_reg()) {
1103     nor(rd, rs, rt.rm());
1104   } else {
1105     // li handles the relocation.
1106     DCHECK(!rs.is(at));
1107     li(at, rt);
1108     nor(rd, rs, at);
1109   }
1110 }
1111 
1112 
Neg(Register rs,const Operand & rt)1113 void MacroAssembler::Neg(Register rs, const Operand& rt) {
1114   DCHECK(rt.is_reg());
1115   DCHECK(!at.is(rs));
1116   DCHECK(!at.is(rt.rm()));
1117   li(at, -1);
1118   xor_(rs, rt.rm(), at);
1119 }
1120 
1121 
Slt(Register rd,Register rs,const Operand & rt)1122 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
1123   if (rt.is_reg()) {
1124     slt(rd, rs, rt.rm());
1125   } else {
1126     if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
1127       slti(rd, rs, rt.imm32_);
1128     } else {
1129       // li handles the relocation.
1130       DCHECK(!rs.is(at));
1131       li(at, rt);
1132       slt(rd, rs, at);
1133     }
1134   }
1135 }
1136 
1137 
Sltu(Register rd,Register rs,const Operand & rt)1138 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
1139   if (rt.is_reg()) {
1140     sltu(rd, rs, rt.rm());
1141   } else {
1142     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
1143       sltiu(rd, rs, rt.imm32_);
1144     } else {
1145       // li handles the relocation.
1146       DCHECK(!rs.is(at));
1147       li(at, rt);
1148       sltu(rd, rs, at);
1149     }
1150   }
1151 }
1152 
1153 
Ror(Register rd,Register rs,const Operand & rt)1154 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1155   if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1156     if (rt.is_reg()) {
1157       rotrv(rd, rs, rt.rm());
1158     } else {
1159       rotr(rd, rs, rt.imm32_ & 0x1f);
1160     }
1161   } else {
1162     if (rt.is_reg()) {
1163       subu(at, zero_reg, rt.rm());
1164       sllv(at, rs, at);
1165       srlv(rd, rs, rt.rm());
1166       or_(rd, rd, at);
1167     } else {
1168       if (rt.imm32_ == 0) {
1169         srl(rd, rs, 0);
1170       } else {
1171         srl(at, rs, rt.imm32_ & 0x1f);
1172         sll(rd, rs, (0x20 - (rt.imm32_ & 0x1f)) & 0x1f);
1173         or_(rd, rd, at);
1174       }
1175     }
1176   }
1177 }
1178 
1179 
Pref(int32_t hint,const MemOperand & rs)1180 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1181   if (IsMipsArchVariant(kLoongson)) {
1182     lw(zero_reg, rs);
1183   } else {
1184     pref(hint, rs);
1185   }
1186 }
1187 
1188 
Lsa(Register rd,Register rt,Register rs,uint8_t sa,Register scratch)1189 void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
1190                          Register scratch) {
1191   DCHECK(sa >= 1 && sa <= 31);
1192   if (IsMipsArchVariant(kMips32r6) && sa <= 4) {
1193     lsa(rd, rt, rs, sa - 1);
1194   } else {
1195     Register tmp = rd.is(rt) ? scratch : rd;
1196     DCHECK(!tmp.is(rt));
1197     sll(tmp, rs, sa);
1198     Addu(rd, rt, tmp);
1199   }
1200 }
1201 
1202 
1203 // ------------Pseudo-instructions-------------
1204 
1205 // Word Swap Byte
ByteSwapSigned(Register reg,int operand_size)1206 void MacroAssembler::ByteSwapSigned(Register reg, int operand_size) {
1207   DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
1208   if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1209     if (operand_size == 2) {
1210       seh(reg, reg);
1211     } else if (operand_size == 1) {
1212       seb(reg, reg);
1213     }
1214     // No need to do any preparation if operand_size is 4
1215 
1216     wsbh(reg, reg);
1217     rotr(reg, reg, 16);
1218   } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
1219     if (operand_size == 1) {
1220       sll(reg, reg, 24);
1221       sra(reg, reg, 24);
1222     } else if (operand_size == 2) {
1223       sll(reg, reg, 16);
1224       sra(reg, reg, 16);
1225     }
1226     // No need to do any preparation if operand_size is 4
1227 
1228     Register tmp = t0;
1229     Register tmp2 = t1;
1230 
1231     andi(tmp2, reg, 0xFF);
1232     sll(tmp2, tmp2, 24);
1233     or_(tmp, zero_reg, tmp2);
1234 
1235     andi(tmp2, reg, 0xFF00);
1236     sll(tmp2, tmp2, 8);
1237     or_(tmp, tmp, tmp2);
1238 
1239     srl(reg, reg, 8);
1240     andi(tmp2, reg, 0xFF00);
1241     or_(tmp, tmp, tmp2);
1242 
1243     srl(reg, reg, 16);
1244     andi(tmp2, reg, 0xFF);
1245     or_(tmp, tmp, tmp2);
1246 
1247     or_(reg, tmp, zero_reg);
1248   }
1249 }
1250 
ByteSwapUnsigned(Register reg,int operand_size)1251 void MacroAssembler::ByteSwapUnsigned(Register reg, int operand_size) {
1252   DCHECK(operand_size == 1 || operand_size == 2);
1253 
1254   if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1255     if (operand_size == 1) {
1256       andi(reg, reg, 0xFF);
1257     } else {
1258       andi(reg, reg, 0xFFFF);
1259     }
1260     // No need to do any preparation if operand_size is 4
1261 
1262     wsbh(reg, reg);
1263     rotr(reg, reg, 16);
1264   } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
1265     if (operand_size == 1) {
1266       sll(reg, reg, 24);
1267     } else {
1268       Register tmp = t0;
1269 
1270       andi(tmp, reg, 0xFF00);
1271       sll(reg, reg, 24);
1272       sll(tmp, tmp, 8);
1273       or_(reg, tmp, reg);
1274     }
1275   }
1276 }
1277 
Ulw(Register rd,const MemOperand & rs)1278 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1279   DCHECK(!rd.is(at));
1280   DCHECK(!rs.rm().is(at));
1281   if (IsMipsArchVariant(kMips32r6)) {
1282     lw(rd, rs);
1283   } else {
1284     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1285            IsMipsArchVariant(kLoongson));
1286     if (is_int16(rs.offset() + kMipsLwrOffset) &&
1287         is_int16(rs.offset() + kMipsLwlOffset)) {
1288       if (!rd.is(rs.rm())) {
1289         lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1290         lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1291       } else {
1292         lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1293         lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1294         mov(rd, at);
1295       }
1296     } else {  // Offset > 16 bits, use multiple instructions to load.
1297       LoadRegPlusOffsetToAt(rs);
1298       lwr(rd, MemOperand(at, kMipsLwrOffset));
1299       lwl(rd, MemOperand(at, kMipsLwlOffset));
1300     }
1301   }
1302 }
1303 
1304 
Usw(Register rd,const MemOperand & rs)1305 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1306   DCHECK(!rd.is(at));
1307   DCHECK(!rs.rm().is(at));
1308   if (IsMipsArchVariant(kMips32r6)) {
1309     sw(rd, rs);
1310   } else {
1311     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1312            IsMipsArchVariant(kLoongson));
1313     if (is_int16(rs.offset() + kMipsSwrOffset) &&
1314         is_int16(rs.offset() + kMipsSwlOffset)) {
1315       swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
1316       swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
1317     } else {
1318       LoadRegPlusOffsetToAt(rs);
1319       swr(rd, MemOperand(at, kMipsSwrOffset));
1320       swl(rd, MemOperand(at, kMipsSwlOffset));
1321     }
1322   }
1323 }
1324 
Ulh(Register rd,const MemOperand & rs)1325 void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
1326   DCHECK(!rd.is(at));
1327   DCHECK(!rs.rm().is(at));
1328   if (IsMipsArchVariant(kMips32r6)) {
1329     lh(rd, rs);
1330   } else {
1331     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1332            IsMipsArchVariant(kLoongson));
1333     if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1334 #if defined(V8_TARGET_LITTLE_ENDIAN)
1335       lbu(at, rs);
1336       lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
1337 #elif defined(V8_TARGET_BIG_ENDIAN)
1338       lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1339       lb(rd, rs);
1340 #endif
1341     } else {  // Offset > 16 bits, use multiple instructions to load.
1342       LoadRegPlusOffsetToAt(rs);
1343 #if defined(V8_TARGET_LITTLE_ENDIAN)
1344       lb(rd, MemOperand(at, 1));
1345       lbu(at, MemOperand(at, 0));
1346 #elif defined(V8_TARGET_BIG_ENDIAN)
1347       lb(rd, MemOperand(at, 0));
1348       lbu(at, MemOperand(at, 1));
1349 #endif
1350     }
1351     sll(rd, rd, 8);
1352     or_(rd, rd, at);
1353   }
1354 }
1355 
Ulhu(Register rd,const MemOperand & rs)1356 void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
1357   DCHECK(!rd.is(at));
1358   DCHECK(!rs.rm().is(at));
1359   if (IsMipsArchVariant(kMips32r6)) {
1360     lhu(rd, rs);
1361   } else {
1362     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1363            IsMipsArchVariant(kLoongson));
1364     if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1365 #if defined(V8_TARGET_LITTLE_ENDIAN)
1366       lbu(at, rs);
1367       lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
1368 #elif defined(V8_TARGET_BIG_ENDIAN)
1369       lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1370       lbu(rd, rs);
1371 #endif
1372     } else {  // Offset > 16 bits, use multiple instructions to load.
1373       LoadRegPlusOffsetToAt(rs);
1374 #if defined(V8_TARGET_LITTLE_ENDIAN)
1375       lbu(rd, MemOperand(at, 1));
1376       lbu(at, MemOperand(at, 0));
1377 #elif defined(V8_TARGET_BIG_ENDIAN)
1378       lbu(rd, MemOperand(at, 0));
1379       lbu(at, MemOperand(at, 1));
1380 #endif
1381     }
1382     sll(rd, rd, 8);
1383     or_(rd, rd, at);
1384   }
1385 }
1386 
Ush(Register rd,const MemOperand & rs,Register scratch)1387 void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
1388   DCHECK(!rd.is(at));
1389   DCHECK(!rs.rm().is(at));
1390   DCHECK(!rs.rm().is(scratch));
1391   DCHECK(!scratch.is(at));
1392   if (IsMipsArchVariant(kMips32r6)) {
1393     sh(rd, rs);
1394   } else {
1395     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1396            IsMipsArchVariant(kLoongson));
1397     MemOperand source = rs;
1398     // If offset > 16 bits, load address to at with offset 0.
1399     if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
1400       LoadRegPlusOffsetToAt(rs);
1401       source = MemOperand(at, 0);
1402     }
1403 
1404     if (!scratch.is(rd)) {
1405       mov(scratch, rd);
1406     }
1407 
1408 #if defined(V8_TARGET_LITTLE_ENDIAN)
1409     sb(scratch, source);
1410     srl(scratch, scratch, 8);
1411     sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1412 #elif defined(V8_TARGET_BIG_ENDIAN)
1413     sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1414     srl(scratch, scratch, 8);
1415     sb(scratch, source);
1416 #endif
1417   }
1418 }
1419 
Ulwc1(FPURegister fd,const MemOperand & rs,Register scratch)1420 void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
1421                            Register scratch) {
1422   if (IsMipsArchVariant(kMips32r6)) {
1423     lwc1(fd, rs);
1424   } else {
1425     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1426            IsMipsArchVariant(kLoongson));
1427     Ulw(scratch, rs);
1428     mtc1(scratch, fd);
1429   }
1430 }
1431 
Uswc1(FPURegister fd,const MemOperand & rs,Register scratch)1432 void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
1433                            Register scratch) {
1434   if (IsMipsArchVariant(kMips32r6)) {
1435     swc1(fd, rs);
1436   } else {
1437     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1438            IsMipsArchVariant(kLoongson));
1439     mfc1(scratch, fd);
1440     Usw(scratch, rs);
1441   }
1442 }
1443 
Uldc1(FPURegister fd,const MemOperand & rs,Register scratch)1444 void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
1445                            Register scratch) {
1446   DCHECK(!scratch.is(at));
1447   if (IsMipsArchVariant(kMips32r6)) {
1448     ldc1(fd, rs);
1449   } else {
1450     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1451            IsMipsArchVariant(kLoongson));
1452     Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
1453     mtc1(scratch, fd);
1454     Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
1455     Mthc1(scratch, fd);
1456   }
1457 }
1458 
Usdc1(FPURegister fd,const MemOperand & rs,Register scratch)1459 void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
1460                            Register scratch) {
1461   DCHECK(!scratch.is(at));
1462   if (IsMipsArchVariant(kMips32r6)) {
1463     sdc1(fd, rs);
1464   } else {
1465     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1466            IsMipsArchVariant(kLoongson));
1467     mfc1(scratch, fd);
1468     Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
1469     Mfhc1(scratch, fd);
1470     Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
1471   }
1472 }
1473 
1474 
li(Register dst,Handle<Object> value,LiFlags mode)1475 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1476   AllowDeferredHandleDereference smi_check;
1477   if (value->IsSmi()) {
1478     li(dst, Operand(value), mode);
1479   } else {
1480     DCHECK(value->IsHeapObject());
1481     if (isolate()->heap()->InNewSpace(*value)) {
1482       Handle<Cell> cell = isolate()->factory()->NewCell(value);
1483       li(dst, Operand(cell));
1484       lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
1485     } else {
1486       li(dst, Operand(value));
1487     }
1488   }
1489 }
1490 
1491 
li(Register rd,Operand j,LiFlags mode)1492 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1493   DCHECK(!j.is_reg());
1494   BlockTrampolinePoolScope block_trampoline_pool(this);
1495   if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1496     // Normal load of an immediate value which does not need Relocation Info.
1497     if (is_int16(j.imm32_)) {
1498       addiu(rd, zero_reg, j.imm32_);
1499     } else if (!(j.imm32_ & kHiMask)) {
1500       ori(rd, zero_reg, j.imm32_);
1501     } else if (!(j.imm32_ & kImm16Mask)) {
1502       lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1503     } else {
1504       lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1505       ori(rd, rd, (j.imm32_ & kImm16Mask));
1506     }
1507   } else {
1508     if (MustUseReg(j.rmode_)) {
1509       RecordRelocInfo(j.rmode_, j.imm32_);
1510     }
1511     // We always need the same number of instructions as we may need to patch
1512     // this code to load another value which may need 2 instructions to load.
1513     lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1514     ori(rd, rd, (j.imm32_ & kImm16Mask));
1515   }
1516 }
1517 
1518 
MultiPush(RegList regs)1519 void MacroAssembler::MultiPush(RegList regs) {
1520   int16_t num_to_push = NumberOfBitsSet(regs);
1521   int16_t stack_offset = num_to_push * kPointerSize;
1522 
1523   Subu(sp, sp, Operand(stack_offset));
1524   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1525     if ((regs & (1 << i)) != 0) {
1526       stack_offset -= kPointerSize;
1527       sw(ToRegister(i), MemOperand(sp, stack_offset));
1528     }
1529   }
1530 }
1531 
1532 
MultiPushReversed(RegList regs)1533 void MacroAssembler::MultiPushReversed(RegList regs) {
1534   int16_t num_to_push = NumberOfBitsSet(regs);
1535   int16_t stack_offset = num_to_push * kPointerSize;
1536 
1537   Subu(sp, sp, Operand(stack_offset));
1538   for (int16_t i = 0; i < kNumRegisters; i++) {
1539     if ((regs & (1 << i)) != 0) {
1540       stack_offset -= kPointerSize;
1541       sw(ToRegister(i), MemOperand(sp, stack_offset));
1542     }
1543   }
1544 }
1545 
1546 
MultiPop(RegList regs)1547 void MacroAssembler::MultiPop(RegList regs) {
1548   int16_t stack_offset = 0;
1549 
1550   for (int16_t i = 0; i < kNumRegisters; i++) {
1551     if ((regs & (1 << i)) != 0) {
1552       lw(ToRegister(i), MemOperand(sp, stack_offset));
1553       stack_offset += kPointerSize;
1554     }
1555   }
1556   addiu(sp, sp, stack_offset);
1557 }
1558 
1559 
MultiPopReversed(RegList regs)1560 void MacroAssembler::MultiPopReversed(RegList regs) {
1561   int16_t stack_offset = 0;
1562 
1563   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1564     if ((regs & (1 << i)) != 0) {
1565       lw(ToRegister(i), MemOperand(sp, stack_offset));
1566       stack_offset += kPointerSize;
1567     }
1568   }
1569   addiu(sp, sp, stack_offset);
1570 }
1571 
1572 
MultiPushFPU(RegList regs)1573 void MacroAssembler::MultiPushFPU(RegList regs) {
1574   int16_t num_to_push = NumberOfBitsSet(regs);
1575   int16_t stack_offset = num_to_push * kDoubleSize;
1576 
1577   Subu(sp, sp, Operand(stack_offset));
1578   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1579     if ((regs & (1 << i)) != 0) {
1580       stack_offset -= kDoubleSize;
1581       sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1582     }
1583   }
1584 }
1585 
1586 
MultiPushReversedFPU(RegList regs)1587 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1588   int16_t num_to_push = NumberOfBitsSet(regs);
1589   int16_t stack_offset = num_to_push * kDoubleSize;
1590 
1591   Subu(sp, sp, Operand(stack_offset));
1592   for (int16_t i = 0; i < kNumRegisters; i++) {
1593     if ((regs & (1 << i)) != 0) {
1594       stack_offset -= kDoubleSize;
1595       sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1596     }
1597   }
1598 }
1599 
1600 
MultiPopFPU(RegList regs)1601 void MacroAssembler::MultiPopFPU(RegList regs) {
1602   int16_t stack_offset = 0;
1603 
1604   for (int16_t i = 0; i < kNumRegisters; i++) {
1605     if ((regs & (1 << i)) != 0) {
1606       ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1607       stack_offset += kDoubleSize;
1608     }
1609   }
1610   addiu(sp, sp, stack_offset);
1611 }
1612 
1613 
MultiPopReversedFPU(RegList regs)1614 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1615   int16_t stack_offset = 0;
1616 
1617   for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1618     if ((regs & (1 << i)) != 0) {
1619       ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1620       stack_offset += kDoubleSize;
1621     }
1622   }
1623   addiu(sp, sp, stack_offset);
1624 }
1625 
AddPair(Register dst_low,Register dst_high,Register left_low,Register left_high,Register right_low,Register right_high)1626 void MacroAssembler::AddPair(Register dst_low, Register dst_high,
1627                              Register left_low, Register left_high,
1628                              Register right_low, Register right_high) {
1629   Label no_overflow;
1630   Register kScratchReg = s3;
1631   Register kScratchReg2 = s4;
1632   // Add lower word
1633   Addu(dst_low, left_low, right_low);
1634   Addu(dst_high, left_high, right_high);
1635   // Check for lower word unsigned overflow
1636   Sltu(kScratchReg, dst_low, left_low);
1637   Sltu(kScratchReg2, dst_low, right_low);
1638   Or(kScratchReg, kScratchReg2, kScratchReg);
1639   Branch(&no_overflow, eq, kScratchReg, Operand(zero_reg));
1640   // Increment higher word if there was overflow
1641   Addu(dst_high, dst_high, 0x1);
1642   bind(&no_overflow);
1643 }
1644 
SubPair(Register dst_low,Register dst_high,Register left_low,Register left_high,Register right_low,Register right_high)1645 void MacroAssembler::SubPair(Register dst_low, Register dst_high,
1646                              Register left_low, Register left_high,
1647                              Register right_low, Register right_high) {
1648   Label no_overflow;
1649   Register kScratchReg = s3;
1650   // Subtract lower word
1651   Subu(dst_low, left_low, right_low);
1652   Subu(dst_high, left_high, right_high);
1653   // Check for lower word unsigned underflow
1654   Sltu(kScratchReg, left_low, right_low);
1655   Branch(&no_overflow, eq, kScratchReg, Operand(zero_reg));
1656   // Decrement higher word if there was underflow
1657   Subu(dst_high, dst_high, 0x1);
1658   bind(&no_overflow);
1659 }
1660 
ShlPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register shift)1661 void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
1662                              Register src_low, Register src_high,
1663                              Register shift) {
1664   Label less_than_32;
1665   Label zero_shift;
1666   Label word_shift;
1667   Label done;
1668   Register kScratchReg = s3;
1669   And(shift, shift, 0x3F);
1670   li(kScratchReg, 0x20);
1671   Branch(&less_than_32, lt, shift, Operand(kScratchReg));
1672 
1673   Branch(&word_shift, eq, shift, Operand(kScratchReg));
1674   // Shift more than 32
1675   Subu(kScratchReg, shift, kScratchReg);
1676   mov(dst_low, zero_reg);
1677   sllv(dst_high, src_low, kScratchReg);
1678   Branch(&done);
1679   // Word shift
1680   bind(&word_shift);
1681   mov(dst_low, zero_reg);
1682   mov(dst_high, src_low);
1683   Branch(&done);
1684 
1685   bind(&less_than_32);
1686   // Check if zero shift
1687   Branch(&zero_shift, eq, shift, Operand(zero_reg));
1688   // Shift less than 32
1689   Subu(kScratchReg, kScratchReg, shift);
1690   sllv(dst_high, src_high, shift);
1691   sllv(dst_low, src_low, shift);
1692   srlv(kScratchReg, src_low, kScratchReg);
1693   Or(dst_high, dst_high, kScratchReg);
1694   Branch(&done);
1695   // Zero shift
1696   bind(&zero_shift);
1697   mov(dst_low, src_low);
1698   mov(dst_high, src_high);
1699   bind(&done);
1700 }
1701 
ShlPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1702 void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
1703                              Register src_low, Register src_high,
1704                              uint32_t shift) {
1705   Register kScratchReg = s3;
1706   shift = shift & 0x3F;
1707   if (shift < 32) {
1708     if (shift == 0) {
1709       mov(dst_low, src_low);
1710       mov(dst_high, src_high);
1711     } else {
1712       sll(dst_high, src_high, shift);
1713       sll(dst_low, src_low, shift);
1714       shift = 32 - shift;
1715       srl(kScratchReg, src_low, shift);
1716       Or(dst_high, dst_high, kScratchReg);
1717     }
1718   } else {
1719     if (shift == 32) {
1720       mov(dst_low, zero_reg);
1721       mov(dst_high, src_low);
1722     } else {
1723       shift = shift - 32;
1724       mov(dst_low, zero_reg);
1725       sll(dst_high, src_low, shift);
1726     }
1727   }
1728 }
1729 
ShrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register shift)1730 void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
1731                              Register src_low, Register src_high,
1732                              Register shift) {
1733   Label less_than_32;
1734   Label zero_shift;
1735   Label word_shift;
1736   Label done;
1737   Register kScratchReg = s3;
1738   And(shift, shift, 0x3F);
1739   li(kScratchReg, 0x20);
1740   Branch(&less_than_32, lt, shift, Operand(kScratchReg));
1741 
1742   Branch(&word_shift, eq, shift, Operand(kScratchReg));
1743   // Shift more than 32
1744   Subu(kScratchReg, shift, kScratchReg);
1745   mov(dst_high, zero_reg);
1746   srlv(dst_low, src_high, kScratchReg);
1747   Branch(&done);
1748   // Word shift
1749   bind(&word_shift);
1750   mov(dst_high, zero_reg);
1751   mov(dst_low, src_high);
1752   Branch(&done);
1753 
1754   bind(&less_than_32);
1755   // Check if zero shift
1756   Branch(&zero_shift, eq, shift, Operand(zero_reg));
1757   // Shift less than 32
1758   Subu(kScratchReg, kScratchReg, shift);
1759   srlv(dst_high, src_high, shift);
1760   srlv(dst_low, src_low, shift);
1761   sllv(kScratchReg, src_high, kScratchReg);
1762   Or(dst_low, dst_low, kScratchReg);
1763   Branch(&done);
1764   // Zero shift
1765   bind(&zero_shift);
1766   mov(dst_low, src_low);
1767   mov(dst_high, src_high);
1768   bind(&done);
1769 }
1770 
ShrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1771 void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
1772                              Register src_low, Register src_high,
1773                              uint32_t shift) {
1774   Register kScratchReg = s3;
1775   shift = shift & 0x3F;
1776   if (shift < 32) {
1777     if (shift == 0) {
1778       mov(dst_low, src_low);
1779       mov(dst_high, src_high);
1780     } else {
1781       srl(dst_high, src_high, shift);
1782       srl(dst_low, src_low, shift);
1783       shift = 32 - shift;
1784       sll(kScratchReg, src_high, shift);
1785       Or(dst_low, dst_low, kScratchReg);
1786     }
1787   } else {
1788     if (shift == 32) {
1789       mov(dst_high, zero_reg);
1790       mov(dst_low, src_high);
1791     } else {
1792       shift = shift - 32;
1793       mov(dst_high, zero_reg);
1794       srl(dst_low, src_high, shift);
1795     }
1796   }
1797 }
1798 
SarPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register shift)1799 void MacroAssembler::SarPair(Register dst_low, Register dst_high,
1800                              Register src_low, Register src_high,
1801                              Register shift) {
1802   Label less_than_32;
1803   Label zero_shift;
1804   Label word_shift;
1805   Label done;
1806   Register kScratchReg = s3;
1807   Register kScratchReg2 = s4;
1808   And(shift, shift, 0x3F);
1809   li(kScratchReg, 0x20);
1810   Branch(&less_than_32, lt, shift, Operand(kScratchReg));
1811 
1812   Branch(&word_shift, eq, shift, Operand(kScratchReg));
1813 
1814   // Shift more than 32
1815   li(kScratchReg2, 0x1F);
1816   Subu(kScratchReg, shift, kScratchReg);
1817   srav(dst_high, src_high, kScratchReg2);
1818   srav(dst_low, src_high, kScratchReg);
1819   Branch(&done);
1820   // Word shift
1821   bind(&word_shift);
1822   li(kScratchReg2, 0x1F);
1823   srav(dst_high, src_high, kScratchReg2);
1824   mov(dst_low, src_high);
1825   Branch(&done);
1826 
1827   bind(&less_than_32);
1828   // Check if zero shift
1829   Branch(&zero_shift, eq, shift, Operand(zero_reg));
1830 
1831   // Shift less than 32
1832   Subu(kScratchReg, kScratchReg, shift);
1833   srav(dst_high, src_high, shift);
1834   srlv(dst_low, src_low, shift);
1835   sllv(kScratchReg, src_high, kScratchReg);
1836   Or(dst_low, dst_low, kScratchReg);
1837   Branch(&done);
1838   // Zero shift
1839   bind(&zero_shift);
1840   mov(dst_low, src_low);
1841   mov(dst_high, src_high);
1842   bind(&done);
1843 }
1844 
SarPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1845 void MacroAssembler::SarPair(Register dst_low, Register dst_high,
1846                              Register src_low, Register src_high,
1847                              uint32_t shift) {
1848   Register kScratchReg = s3;
1849   shift = shift & 0x3F;
1850   if (shift < 32) {
1851     if (shift == 0) {
1852       mov(dst_low, src_low);
1853       mov(dst_high, src_high);
1854     } else {
1855       sra(dst_high, src_high, shift);
1856       srl(dst_low, src_low, shift);
1857       shift = 32 - shift;
1858       sll(kScratchReg, src_high, shift);
1859       Or(dst_low, dst_low, kScratchReg);
1860     }
1861   } else {
1862     if (shift == 32) {
1863       sra(dst_high, src_high, 31);
1864       mov(dst_low, src_high);
1865     } else {
1866       shift = shift - 32;
1867       sra(dst_high, src_high, 31);
1868       sra(dst_low, src_high, shift);
1869     }
1870   }
1871 }
1872 
Ext(Register rt,Register rs,uint16_t pos,uint16_t size)1873 void MacroAssembler::Ext(Register rt,
1874                          Register rs,
1875                          uint16_t pos,
1876                          uint16_t size) {
1877   DCHECK(pos < 32);
1878   DCHECK(pos + size < 33);
1879 
1880   if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1881     ext_(rt, rs, pos, size);
1882   } else {
1883     // Move rs to rt and shift it left then right to get the
1884     // desired bitfield on the right side and zeroes on the left.
1885     int shift_left = 32 - (pos + size);
1886     sll(rt, rs, shift_left);  // Acts as a move if shift_left == 0.
1887 
1888     int shift_right = 32 - size;
1889     if (shift_right > 0) {
1890       srl(rt, rt, shift_right);
1891     }
1892   }
1893 }
1894 
1895 
Ins(Register rt,Register rs,uint16_t pos,uint16_t size)1896 void MacroAssembler::Ins(Register rt,
1897                          Register rs,
1898                          uint16_t pos,
1899                          uint16_t size) {
1900   DCHECK(pos < 32);
1901   DCHECK(pos + size <= 32);
1902   DCHECK(size != 0);
1903 
1904   if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1905     ins_(rt, rs, pos, size);
1906   } else {
1907     DCHECK(!rt.is(t8) && !rs.is(t8));
1908     Subu(at, zero_reg, Operand(1));
1909     srl(at, at, 32 - size);
1910     and_(t8, rs, at);
1911     sll(t8, t8, pos);
1912     sll(at, at, pos);
1913     nor(at, at, zero_reg);
1914     and_(at, rt, at);
1915     or_(rt, t8, at);
1916   }
1917 }
1918 
1919 
Cvt_d_uw(FPURegister fd,Register rs,FPURegister scratch)1920 void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs,
1921                               FPURegister scratch) {
1922   // In FP64Mode we do convertion from long.
1923   if (IsFp64Mode()) {
1924     mtc1(rs, scratch);
1925     Mthc1(zero_reg, scratch);
1926     cvt_d_l(fd, scratch);
1927   } else {
1928     // Convert rs to a FP value in fd.
1929     DCHECK(!fd.is(scratch));
1930     DCHECK(!rs.is(at));
1931 
1932     Label msb_clear, conversion_done;
1933     // For a value which is < 2^31, regard it as a signed positve word.
1934     Branch(&msb_clear, ge, rs, Operand(zero_reg), USE_DELAY_SLOT);
1935     mtc1(rs, fd);
1936 
1937     li(at, 0x41F00000);  // FP value: 2^32.
1938 
1939     // For unsigned inputs > 2^31, we convert to double as a signed int32,
1940     // then add 2^32 to move it back to unsigned value in range 2^31..2^31-1.
1941     mtc1(zero_reg, scratch);
1942     Mthc1(at, scratch);
1943 
1944     cvt_d_w(fd, fd);
1945 
1946     Branch(USE_DELAY_SLOT, &conversion_done);
1947     add_d(fd, fd, scratch);
1948 
1949     bind(&msb_clear);
1950     cvt_d_w(fd, fd);
1951 
1952     bind(&conversion_done);
1953   }
1954 }
1955 
1956 
Trunc_uw_d(FPURegister fd,FPURegister fs,FPURegister scratch)1957 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1958                                 FPURegister fs,
1959                                 FPURegister scratch) {
1960   Trunc_uw_d(fs, t8, scratch);
1961   mtc1(t8, fd);
1962 }
1963 
Trunc_uw_s(FPURegister fd,FPURegister fs,FPURegister scratch)1964 void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
1965                                 FPURegister scratch) {
1966   Trunc_uw_s(fs, t8, scratch);
1967   mtc1(t8, fd);
1968 }
1969 
Trunc_w_d(FPURegister fd,FPURegister fs)1970 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1971   if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1972     Mfhc1(t8, fs);
1973     trunc_w_d(fd, fs);
1974     Mthc1(t8, fs);
1975   } else {
1976     trunc_w_d(fd, fs);
1977   }
1978 }
1979 
1980 
Round_w_d(FPURegister fd,FPURegister fs)1981 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1982   if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1983     Mfhc1(t8, fs);
1984     round_w_d(fd, fs);
1985     Mthc1(t8, fs);
1986   } else {
1987     round_w_d(fd, fs);
1988   }
1989 }
1990 
1991 
Floor_w_d(FPURegister fd,FPURegister fs)1992 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1993   if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1994     Mfhc1(t8, fs);
1995     floor_w_d(fd, fs);
1996     Mthc1(t8, fs);
1997   } else {
1998     floor_w_d(fd, fs);
1999   }
2000 }
2001 
2002 
Ceil_w_d(FPURegister fd,FPURegister fs)2003 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
2004   if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
2005     Mfhc1(t8, fs);
2006     ceil_w_d(fd, fs);
2007     Mthc1(t8, fs);
2008   } else {
2009     ceil_w_d(fd, fs);
2010   }
2011 }
2012 
2013 
Trunc_uw_d(FPURegister fd,Register rs,FPURegister scratch)2014 void MacroAssembler::Trunc_uw_d(FPURegister fd,
2015                                 Register rs,
2016                                 FPURegister scratch) {
2017   DCHECK(!fd.is(scratch));
2018   DCHECK(!rs.is(at));
2019 
2020   // Load 2^31 into scratch as its float representation.
2021   li(at, 0x41E00000);
2022   mtc1(zero_reg, scratch);
2023   Mthc1(at, scratch);
2024   // Test if scratch > fd.
2025   // If fd < 2^31 we can convert it normally.
2026   Label simple_convert;
2027   BranchF(&simple_convert, NULL, lt, fd, scratch);
2028 
2029   // First we subtract 2^31 from fd, then trunc it to rs
2030   // and add 2^31 to rs.
2031   sub_d(scratch, fd, scratch);
2032   trunc_w_d(scratch, scratch);
2033   mfc1(rs, scratch);
2034   Or(rs, rs, 1 << 31);
2035 
2036   Label done;
2037   Branch(&done);
2038   // Simple conversion.
2039   bind(&simple_convert);
2040   trunc_w_d(scratch, fd);
2041   mfc1(rs, scratch);
2042 
2043   bind(&done);
2044 }
2045 
Trunc_uw_s(FPURegister fd,Register rs,FPURegister scratch)2046 void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
2047                                 FPURegister scratch) {
2048   DCHECK(!fd.is(scratch));
2049   DCHECK(!rs.is(at));
2050 
2051   // Load 2^31 into scratch as its float representation.
2052   li(at, 0x4F000000);
2053   mtc1(at, scratch);
2054   // Test if scratch > fd.
2055   // If fd < 2^31 we can convert it normally.
2056   Label simple_convert;
2057   BranchF32(&simple_convert, NULL, lt, fd, scratch);
2058 
2059   // First we subtract 2^31 from fd, then trunc it to rs
2060   // and add 2^31 to rs.
2061   sub_s(scratch, fd, scratch);
2062   trunc_w_s(scratch, scratch);
2063   mfc1(rs, scratch);
2064   Or(rs, rs, 1 << 31);
2065 
2066   Label done;
2067   Branch(&done);
2068   // Simple conversion.
2069   bind(&simple_convert);
2070   trunc_w_s(scratch, fd);
2071   mfc1(rs, scratch);
2072 
2073   bind(&done);
2074 }
2075 
Mthc1(Register rt,FPURegister fs)2076 void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
2077   if (IsFp32Mode()) {
2078     mtc1(rt, fs.high());
2079   } else {
2080     DCHECK(IsFp64Mode() || IsFpxxMode());
2081     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2082     mthc1(rt, fs);
2083   }
2084 }
2085 
2086 
Mfhc1(Register rt,FPURegister fs)2087 void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
2088   if (IsFp32Mode()) {
2089     mfc1(rt, fs.high());
2090   } else {
2091     DCHECK(IsFp64Mode() || IsFpxxMode());
2092     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2093     mfhc1(rt, fs);
2094   }
2095 }
2096 
2097 
BranchFCommon(SecondaryField sizeField,Label * target,Label * nan,Condition cond,FPURegister cmp1,FPURegister cmp2,BranchDelaySlot bd)2098 void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
2099                                    Label* nan, Condition cond, FPURegister cmp1,
2100                                    FPURegister cmp2, BranchDelaySlot bd) {
2101   {
2102     BlockTrampolinePoolScope block_trampoline_pool(this);
2103     if (cond == al) {
2104       Branch(bd, target);
2105       return;
2106     }
2107 
2108     if (IsMipsArchVariant(kMips32r6)) {
2109       sizeField = sizeField == D ? L : W;
2110     }
2111     DCHECK(nan || target);
2112     // Check for unordered (NaN) cases.
2113     if (nan) {
2114       bool long_branch =
2115           nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
2116       if (!IsMipsArchVariant(kMips32r6)) {
2117         if (long_branch) {
2118           Label skip;
2119           c(UN, sizeField, cmp1, cmp2);
2120           bc1f(&skip);
2121           nop();
2122           BranchLong(nan, bd);
2123           bind(&skip);
2124         } else {
2125           c(UN, sizeField, cmp1, cmp2);
2126           bc1t(nan);
2127           if (bd == PROTECT) {
2128             nop();
2129           }
2130         }
2131       } else {
2132         // Use kDoubleCompareReg for comparison result. It has to be unavailable
2133         // to lithium register allocator.
2134         DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
2135         if (long_branch) {
2136           Label skip;
2137           cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
2138           bc1eqz(&skip, kDoubleCompareReg);
2139           nop();
2140           BranchLong(nan, bd);
2141           bind(&skip);
2142         } else {
2143           cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
2144           bc1nez(nan, kDoubleCompareReg);
2145           if (bd == PROTECT) {
2146             nop();
2147           }
2148         }
2149       }
2150     }
2151 
2152     if (target) {
2153       bool long_branch =
2154           target->is_bound() ? is_near(target) : is_trampoline_emitted();
2155       if (long_branch) {
2156         Label skip;
2157         Condition neg_cond = NegateFpuCondition(cond);
2158         BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
2159         BranchLong(target, bd);
2160         bind(&skip);
2161       } else {
2162         BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
2163       }
2164     }
2165   }
2166 }
2167 
BranchShortF(SecondaryField sizeField,Label * target,Condition cc,FPURegister cmp1,FPURegister cmp2,BranchDelaySlot bd)2168 void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
2169                                   Condition cc, FPURegister cmp1,
2170                                   FPURegister cmp2, BranchDelaySlot bd) {
2171   if (!IsMipsArchVariant(kMips32r6)) {
2172     BlockTrampolinePoolScope block_trampoline_pool(this);
2173     if (target) {
2174       // Here NaN cases were either handled by this function or are assumed to
2175       // have been handled by the caller.
2176       switch (cc) {
2177         case lt:
2178           c(OLT, sizeField, cmp1, cmp2);
2179           bc1t(target);
2180           break;
2181         case ult:
2182           c(ULT, sizeField, cmp1, cmp2);
2183           bc1t(target);
2184           break;
2185         case gt:
2186           c(ULE, sizeField, cmp1, cmp2);
2187           bc1f(target);
2188           break;
2189         case ugt:
2190           c(OLE, sizeField, cmp1, cmp2);
2191           bc1f(target);
2192           break;
2193         case ge:
2194           c(ULT, sizeField, cmp1, cmp2);
2195           bc1f(target);
2196           break;
2197         case uge:
2198           c(OLT, sizeField, cmp1, cmp2);
2199           bc1f(target);
2200           break;
2201         case le:
2202           c(OLE, sizeField, cmp1, cmp2);
2203           bc1t(target);
2204           break;
2205         case ule:
2206           c(ULE, sizeField, cmp1, cmp2);
2207           bc1t(target);
2208           break;
2209         case eq:
2210           c(EQ, sizeField, cmp1, cmp2);
2211           bc1t(target);
2212           break;
2213         case ueq:
2214           c(UEQ, sizeField, cmp1, cmp2);
2215           bc1t(target);
2216           break;
2217         case ne:  // Unordered or not equal.
2218           c(EQ, sizeField, cmp1, cmp2);
2219           bc1f(target);
2220           break;
2221         case ogl:
2222           c(UEQ, sizeField, cmp1, cmp2);
2223           bc1f(target);
2224           break;
2225         default:
2226           CHECK(0);
2227       }
2228     }
2229   } else {
2230     BlockTrampolinePoolScope block_trampoline_pool(this);
2231     if (target) {
2232       // Here NaN cases were either handled by this function or are assumed to
2233       // have been handled by the caller.
2234       // Unsigned conditions are treated as their signed counterpart.
2235       // Use kDoubleCompareReg for comparison result, it is
2236       // valid in fp64 (FR = 1) mode which is implied for mips32r6.
2237       DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
2238       switch (cc) {
2239         case lt:
2240           cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2241           bc1nez(target, kDoubleCompareReg);
2242           break;
2243         case ult:
2244           cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2245           bc1nez(target, kDoubleCompareReg);
2246           break;
2247         case gt:
2248           cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2249           bc1eqz(target, kDoubleCompareReg);
2250           break;
2251         case ugt:
2252           cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2253           bc1eqz(target, kDoubleCompareReg);
2254           break;
2255         case ge:
2256           cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2257           bc1eqz(target, kDoubleCompareReg);
2258           break;
2259         case uge:
2260           cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2261           bc1eqz(target, kDoubleCompareReg);
2262           break;
2263         case le:
2264           cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2265           bc1nez(target, kDoubleCompareReg);
2266           break;
2267         case ule:
2268           cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2269           bc1nez(target, kDoubleCompareReg);
2270           break;
2271         case eq:
2272           cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2273           bc1nez(target, kDoubleCompareReg);
2274           break;
2275         case ueq:
2276           cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2277           bc1nez(target, kDoubleCompareReg);
2278           break;
2279         case ne:
2280           cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2281           bc1eqz(target, kDoubleCompareReg);
2282           break;
2283         case ogl:
2284           cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2285           bc1eqz(target, kDoubleCompareReg);
2286           break;
2287         default:
2288           CHECK(0);
2289       }
2290     }
2291   }
2292   if (bd == PROTECT) {
2293     nop();
2294   }
2295 }
2296 
2297 
FmoveLow(FPURegister dst,Register src_low)2298 void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
2299   if (IsFp32Mode()) {
2300     mtc1(src_low, dst);
2301   } else {
2302     DCHECK(IsFp64Mode() || IsFpxxMode());
2303     DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2304     DCHECK(!src_low.is(at));
2305     mfhc1(at, dst);
2306     mtc1(src_low, dst);
2307     mthc1(at, dst);
2308   }
2309 }
2310 
2311 
Move(FPURegister dst,float imm)2312 void MacroAssembler::Move(FPURegister dst, float imm) {
2313   li(at, Operand(bit_cast<int32_t>(imm)));
2314   mtc1(at, dst);
2315 }
2316 
2317 
Move(FPURegister dst,double imm)2318 void MacroAssembler::Move(FPURegister dst, double imm) {
2319   static const DoubleRepresentation minus_zero(-0.0);
2320   static const DoubleRepresentation zero(0.0);
2321   DoubleRepresentation value_rep(imm);
2322   // Handle special values first.
2323   if (value_rep == zero && has_double_zero_reg_set_) {
2324     mov_d(dst, kDoubleRegZero);
2325   } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
2326     neg_d(dst, kDoubleRegZero);
2327   } else {
2328     uint32_t lo, hi;
2329     DoubleAsTwoUInt32(imm, &lo, &hi);
2330     // Move the low part of the double into the lower of the corresponding FPU
2331     // register of FPU register pair.
2332     if (lo != 0) {
2333       li(at, Operand(lo));
2334       mtc1(at, dst);
2335     } else {
2336       mtc1(zero_reg, dst);
2337     }
2338     // Move the high part of the double into the higher of the corresponding FPU
2339     // register of FPU register pair.
2340     if (hi != 0) {
2341       li(at, Operand(hi));
2342       Mthc1(at, dst);
2343     } else {
2344       Mthc1(zero_reg, dst);
2345     }
2346     if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
2347   }
2348 }
2349 
2350 
Movz(Register rd,Register rs,Register rt)2351 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
2352   if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
2353     Label done;
2354     Branch(&done, ne, rt, Operand(zero_reg));
2355     mov(rd, rs);
2356     bind(&done);
2357   } else {
2358     movz(rd, rs, rt);
2359   }
2360 }
2361 
2362 
Movn(Register rd,Register rs,Register rt)2363 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
2364   if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
2365     Label done;
2366     Branch(&done, eq, rt, Operand(zero_reg));
2367     mov(rd, rs);
2368     bind(&done);
2369   } else {
2370     movn(rd, rs, rt);
2371   }
2372 }
2373 
2374 
Movt(Register rd,Register rs,uint16_t cc)2375 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
2376   if (IsMipsArchVariant(kLoongson)) {
2377     // Tests an FP condition code and then conditionally move rs to rd.
2378     // We do not currently use any FPU cc bit other than bit 0.
2379     DCHECK(cc == 0);
2380     DCHECK(!(rs.is(t8) || rd.is(t8)));
2381     Label done;
2382     Register scratch = t8;
2383     // For testing purposes we need to fetch content of the FCSR register and
2384     // than test its cc (floating point condition code) bit (for cc = 0, it is
2385     // 24. bit of the FCSR).
2386     cfc1(scratch, FCSR);
2387     // For the MIPS I, II and III architectures, the contents of scratch is
2388     // UNPREDICTABLE for the instruction immediately following CFC1.
2389     nop();
2390     srl(scratch, scratch, 16);
2391     andi(scratch, scratch, 0x0080);
2392     Branch(&done, eq, scratch, Operand(zero_reg));
2393     mov(rd, rs);
2394     bind(&done);
2395   } else {
2396     movt(rd, rs, cc);
2397   }
2398 }
2399 
2400 
Movf(Register rd,Register rs,uint16_t cc)2401 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
2402   if (IsMipsArchVariant(kLoongson)) {
2403     // Tests an FP condition code and then conditionally move rs to rd.
2404     // We do not currently use any FPU cc bit other than bit 0.
2405     DCHECK(cc == 0);
2406     DCHECK(!(rs.is(t8) || rd.is(t8)));
2407     Label done;
2408     Register scratch = t8;
2409     // For testing purposes we need to fetch content of the FCSR register and
2410     // than test its cc (floating point condition code) bit (for cc = 0, it is
2411     // 24. bit of the FCSR).
2412     cfc1(scratch, FCSR);
2413     // For the MIPS I, II and III architectures, the contents of scratch is
2414     // UNPREDICTABLE for the instruction immediately following CFC1.
2415     nop();
2416     srl(scratch, scratch, 16);
2417     andi(scratch, scratch, 0x0080);
2418     Branch(&done, ne, scratch, Operand(zero_reg));
2419     mov(rd, rs);
2420     bind(&done);
2421   } else {
2422     movf(rd, rs, cc);
2423   }
2424 }
2425 
2426 #define __ masm->
2427 
ZeroHelper_d(MacroAssembler * masm,MaxMinKind kind,FPURegister dst,FPURegister src1,FPURegister src2,Label * equal)2428 static bool ZeroHelper_d(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
2429                          FPURegister src1, FPURegister src2, Label* equal) {
2430   if (src1.is(src2)) {
2431     __ Move(dst, src1);
2432     return true;
2433   }
2434 
2435   Label other, compare_not_equal;
2436   FPURegister left, right;
2437   if (kind == MaxMinKind::kMin) {
2438     left = src1;
2439     right = src2;
2440   } else {
2441     left = src2;
2442     right = src1;
2443   }
2444 
2445   __ BranchF64(&compare_not_equal, nullptr, ne, src1, src2);
2446   // Left and right hand side are equal, check for -0 vs. +0.
2447   __ FmoveHigh(t8, src1);
2448   __ Branch(&other, eq, t8, Operand(0x80000000));
2449   __ Move_d(dst, right);
2450   __ Branch(equal);
2451   __ bind(&other);
2452   __ Move_d(dst, left);
2453   __ Branch(equal);
2454   __ bind(&compare_not_equal);
2455   return false;
2456 }
2457 
ZeroHelper_s(MacroAssembler * masm,MaxMinKind kind,FPURegister dst,FPURegister src1,FPURegister src2,Label * equal)2458 static bool ZeroHelper_s(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
2459                          FPURegister src1, FPURegister src2, Label* equal) {
2460   if (src1.is(src2)) {
2461     __ Move(dst, src1);
2462     return true;
2463   }
2464 
2465   Label other, compare_not_equal;
2466   FPURegister left, right;
2467   if (kind == MaxMinKind::kMin) {
2468     left = src1;
2469     right = src2;
2470   } else {
2471     left = src2;
2472     right = src1;
2473   }
2474 
2475   __ BranchF32(&compare_not_equal, nullptr, ne, src1, src2);
2476   // Left and right hand side are equal, check for -0 vs. +0.
2477   __ FmoveLow(t8, src1);
2478   __ Branch(&other, eq, t8, Operand(0x80000000));
2479   __ Move_s(dst, right);
2480   __ Branch(equal);
2481   __ bind(&other);
2482   __ Move_s(dst, left);
2483   __ Branch(equal);
2484   __ bind(&compare_not_equal);
2485   return false;
2486 }
2487 
2488 #undef __
2489 
MinNaNCheck_d(FPURegister dst,FPURegister src1,FPURegister src2,Label * nan)2490 void MacroAssembler::MinNaNCheck_d(FPURegister dst, FPURegister src1,
2491                                    FPURegister src2, Label* nan) {
2492   if (nan) {
2493     BranchF64(nullptr, nan, eq, src1, src2);
2494   }
2495   if (IsMipsArchVariant(kMips32r6)) {
2496     min_d(dst, src1, src2);
2497   } else {
2498     Label skip;
2499     if (!ZeroHelper_d(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
2500       if (dst.is(src1)) {
2501         BranchF64(&skip, nullptr, le, src1, src2);
2502         Move_d(dst, src2);
2503       } else if (dst.is(src2)) {
2504         BranchF64(&skip, nullptr, ge, src1, src2);
2505         Move_d(dst, src1);
2506       } else {
2507         Label right;
2508         BranchF64(&right, nullptr, gt, src1, src2);
2509         Move_d(dst, src1);
2510         Branch(&skip);
2511         bind(&right);
2512         Move_d(dst, src2);
2513       }
2514     }
2515     bind(&skip);
2516   }
2517 }
2518 
MaxNaNCheck_d(FPURegister dst,FPURegister src1,FPURegister src2,Label * nan)2519 void MacroAssembler::MaxNaNCheck_d(FPURegister dst, FPURegister src1,
2520                                    FPURegister src2, Label* nan) {
2521   if (nan) {
2522     BranchF64(nullptr, nan, eq, src1, src2);
2523   }
2524   if (IsMipsArchVariant(kMips32r6)) {
2525     max_d(dst, src1, src2);
2526   } else {
2527     Label skip;
2528     if (!ZeroHelper_d(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
2529       if (dst.is(src1)) {
2530         BranchF64(&skip, nullptr, ge, src1, src2);
2531         Move_d(dst, src2);
2532       } else if (dst.is(src2)) {
2533         BranchF64(&skip, nullptr, le, src1, src2);
2534         Move_d(dst, src1);
2535       } else {
2536         Label right;
2537         BranchF64(&right, nullptr, lt, src1, src2);
2538         Move_d(dst, src1);
2539         Branch(&skip);
2540         bind(&right);
2541         Move_d(dst, src2);
2542       }
2543     }
2544     bind(&skip);
2545   }
2546 }
2547 
MinNaNCheck_s(FPURegister dst,FPURegister src1,FPURegister src2,Label * nan)2548 void MacroAssembler::MinNaNCheck_s(FPURegister dst, FPURegister src1,
2549                                    FPURegister src2, Label* nan) {
2550   if (nan) {
2551     BranchF32(nullptr, nan, eq, src1, src2);
2552   }
2553   if (IsMipsArchVariant(kMips32r6)) {
2554     min_s(dst, src1, src2);
2555   } else {
2556     Label skip;
2557     if (!ZeroHelper_s(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
2558       if (dst.is(src1)) {
2559         BranchF32(&skip, nullptr, le, src1, src2);
2560         Move_s(dst, src2);
2561       } else if (dst.is(src2)) {
2562         BranchF32(&skip, nullptr, ge, src1, src2);
2563         Move_s(dst, src1);
2564       } else {
2565         Label right;
2566         BranchF32(&right, nullptr, gt, src1, src2);
2567         Move_s(dst, src1);
2568         Branch(&skip);
2569         bind(&right);
2570         Move_s(dst, src2);
2571       }
2572     }
2573     bind(&skip);
2574   }
2575 }
2576 
MaxNaNCheck_s(FPURegister dst,FPURegister src1,FPURegister src2,Label * nan)2577 void MacroAssembler::MaxNaNCheck_s(FPURegister dst, FPURegister src1,
2578                                    FPURegister src2, Label* nan) {
2579   if (nan) {
2580     BranchF32(nullptr, nan, eq, src1, src2);
2581   }
2582   if (IsMipsArchVariant(kMips32r6)) {
2583     max_s(dst, src1, src2);
2584   } else {
2585     Label skip;
2586     if (!ZeroHelper_s(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
2587       if (dst.is(src1)) {
2588         BranchF32(&skip, nullptr, ge, src1, src2);
2589         Move_s(dst, src2);
2590       } else if (dst.is(src2)) {
2591         BranchF32(&skip, nullptr, le, src1, src2);
2592         Move_s(dst, src1);
2593       } else {
2594         Label right;
2595         BranchF32(&right, nullptr, lt, src1, src2);
2596         Move_s(dst, src1);
2597         Branch(&skip);
2598         bind(&right);
2599         Move_s(dst, src2);
2600       }
2601     }
2602     bind(&skip);
2603   }
2604 }
2605 
Clz(Register rd,Register rs)2606 void MacroAssembler::Clz(Register rd, Register rs) {
2607   if (IsMipsArchVariant(kLoongson)) {
2608     DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
2609     Register mask = t8;
2610     Register scratch = t9;
2611     Label loop, end;
2612     mov(at, rs);
2613     mov(rd, zero_reg);
2614     lui(mask, 0x8000);
2615     bind(&loop);
2616     and_(scratch, at, mask);
2617     Branch(&end, ne, scratch, Operand(zero_reg));
2618     addiu(rd, rd, 1);
2619     Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
2620     srl(mask, mask, 1);
2621     bind(&end);
2622   } else {
2623     clz(rd, rs);
2624   }
2625 }
2626 
2627 
EmitFPUTruncate(FPURoundingMode rounding_mode,Register result,DoubleRegister double_input,Register scratch,DoubleRegister double_scratch,Register except_flag,CheckForInexactConversion check_inexact)2628 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
2629                                      Register result,
2630                                      DoubleRegister double_input,
2631                                      Register scratch,
2632                                      DoubleRegister double_scratch,
2633                                      Register except_flag,
2634                                      CheckForInexactConversion check_inexact) {
2635   DCHECK(!result.is(scratch));
2636   DCHECK(!double_input.is(double_scratch));
2637   DCHECK(!except_flag.is(scratch));
2638 
2639   Label done;
2640 
2641   // Clear the except flag (0 = no exception)
2642   mov(except_flag, zero_reg);
2643 
2644   // Test for values that can be exactly represented as a signed 32-bit integer.
2645   cvt_w_d(double_scratch, double_input);
2646   mfc1(result, double_scratch);
2647   cvt_d_w(double_scratch, double_scratch);
2648   BranchF(&done, NULL, eq, double_input, double_scratch);
2649 
2650   int32_t except_mask = kFCSRFlagMask;  // Assume interested in all exceptions.
2651 
2652   if (check_inexact == kDontCheckForInexactConversion) {
2653     // Ignore inexact exceptions.
2654     except_mask &= ~kFCSRInexactFlagMask;
2655   }
2656 
2657   // Save FCSR.
2658   cfc1(scratch, FCSR);
2659   // Disable FPU exceptions.
2660   ctc1(zero_reg, FCSR);
2661 
2662   // Do operation based on rounding mode.
2663   switch (rounding_mode) {
2664     case kRoundToNearest:
2665       Round_w_d(double_scratch, double_input);
2666       break;
2667     case kRoundToZero:
2668       Trunc_w_d(double_scratch, double_input);
2669       break;
2670     case kRoundToPlusInf:
2671       Ceil_w_d(double_scratch, double_input);
2672       break;
2673     case kRoundToMinusInf:
2674       Floor_w_d(double_scratch, double_input);
2675       break;
2676   }  // End of switch-statement.
2677 
2678   // Retrieve FCSR.
2679   cfc1(except_flag, FCSR);
2680   // Restore FCSR.
2681   ctc1(scratch, FCSR);
2682   // Move the converted value into the result register.
2683   mfc1(result, double_scratch);
2684 
2685   // Check for fpu exceptions.
2686   And(except_flag, except_flag, Operand(except_mask));
2687 
2688   bind(&done);
2689 }
2690 
2691 
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)2692 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2693                                                 DoubleRegister double_input,
2694                                                 Label* done) {
2695   DoubleRegister single_scratch = kLithiumScratchDouble.low();
2696   Register scratch = at;
2697   Register scratch2 = t9;
2698 
2699   // Clear cumulative exception flags and save the FCSR.
2700   cfc1(scratch2, FCSR);
2701   ctc1(zero_reg, FCSR);
2702   // Try a conversion to a signed integer.
2703   trunc_w_d(single_scratch, double_input);
2704   mfc1(result, single_scratch);
2705   // Retrieve and restore the FCSR.
2706   cfc1(scratch, FCSR);
2707   ctc1(scratch2, FCSR);
2708   // Check for overflow and NaNs.
2709   And(scratch,
2710       scratch,
2711       kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
2712   // If we had no exceptions we are done.
2713   Branch(done, eq, scratch, Operand(zero_reg));
2714 }
2715 
2716 
TruncateDoubleToI(Register result,DoubleRegister double_input)2717 void MacroAssembler::TruncateDoubleToI(Register result,
2718                                        DoubleRegister double_input) {
2719   Label done;
2720 
2721   TryInlineTruncateDoubleToI(result, double_input, &done);
2722 
2723   // If we fell through then inline version didn't succeed - call stub instead.
2724   push(ra);
2725   Subu(sp, sp, Operand(kDoubleSize));  // Put input on stack.
2726   sdc1(double_input, MemOperand(sp, 0));
2727 
2728   DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2729   CallStub(&stub);
2730 
2731   Addu(sp, sp, Operand(kDoubleSize));
2732   pop(ra);
2733 
2734   bind(&done);
2735 }
2736 
2737 
TruncateHeapNumberToI(Register result,Register object)2738 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2739   Label done;
2740   DoubleRegister double_scratch = f12;
2741   DCHECK(!result.is(object));
2742 
2743   ldc1(double_scratch,
2744        MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2745   TryInlineTruncateDoubleToI(result, double_scratch, &done);
2746 
2747   // If we fell through then inline version didn't succeed - call stub instead.
2748   push(ra);
2749   DoubleToIStub stub(isolate(),
2750                      object,
2751                      result,
2752                      HeapNumber::kValueOffset - kHeapObjectTag,
2753                      true,
2754                      true);
2755   CallStub(&stub);
2756   pop(ra);
2757 
2758   bind(&done);
2759 }
2760 
2761 
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch,Label * not_number)2762 void MacroAssembler::TruncateNumberToI(Register object,
2763                                        Register result,
2764                                        Register heap_number_map,
2765                                        Register scratch,
2766                                        Label* not_number) {
2767   Label done;
2768   DCHECK(!result.is(object));
2769 
2770   UntagAndJumpIfSmi(result, object, &done);
2771   JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
2772   TruncateHeapNumberToI(result, object);
2773 
2774   bind(&done);
2775 }
2776 
2777 
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)2778 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2779                                          Register src,
2780                                          int num_least_bits) {
2781   Ext(dst, src, kSmiTagSize, num_least_bits);
2782 }
2783 
2784 
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)2785 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2786                                            Register src,
2787                                            int num_least_bits) {
2788   And(dst, src, Operand((1 << num_least_bits) - 1));
2789 }
2790 
2791 
2792 // Emulated condtional branches do not emit a nop in the branch delay slot.
2793 //
2794 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
2795 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK(                                \
2796     (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) ||          \
2797     (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
2798 
2799 
Branch(int32_t offset,BranchDelaySlot bdslot)2800 void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
2801   DCHECK(IsMipsArchVariant(kMips32r6) ? is_int26(offset) : is_int16(offset));
2802   BranchShort(offset, bdslot);
2803 }
2804 
2805 
Branch(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2806 void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
2807                             const Operand& rt, BranchDelaySlot bdslot) {
2808   bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
2809   DCHECK(is_near);
2810   USE(is_near);
2811 }
2812 
2813 
Branch(Label * L,BranchDelaySlot bdslot)2814 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
2815   if (L->is_bound()) {
2816     if (is_near_branch(L)) {
2817       BranchShort(L, bdslot);
2818     } else {
2819       BranchLong(L, bdslot);
2820     }
2821   } else {
2822     if (is_trampoline_emitted()) {
2823       BranchLong(L, bdslot);
2824     } else {
2825       BranchShort(L, bdslot);
2826     }
2827   }
2828 }
2829 
2830 
Branch(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2831 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
2832                             const Operand& rt,
2833                             BranchDelaySlot bdslot) {
2834   if (L->is_bound()) {
2835     if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
2836       if (cond != cc_always) {
2837         Label skip;
2838         Condition neg_cond = NegateCondition(cond);
2839         BranchShort(&skip, neg_cond, rs, rt);
2840         BranchLong(L, bdslot);
2841         bind(&skip);
2842       } else {
2843         BranchLong(L, bdslot);
2844       }
2845     }
2846   } else {
2847     if (is_trampoline_emitted()) {
2848       if (cond != cc_always) {
2849         Label skip;
2850         Condition neg_cond = NegateCondition(cond);
2851         BranchShort(&skip, neg_cond, rs, rt);
2852         BranchLong(L, bdslot);
2853         bind(&skip);
2854       } else {
2855         BranchLong(L, bdslot);
2856       }
2857     } else {
2858       BranchShort(L, cond, rs, rt, bdslot);
2859     }
2860   }
2861 }
2862 
2863 
Branch(Label * L,Condition cond,Register rs,Heap::RootListIndex index,BranchDelaySlot bdslot)2864 void MacroAssembler::Branch(Label* L,
2865                             Condition cond,
2866                             Register rs,
2867                             Heap::RootListIndex index,
2868                             BranchDelaySlot bdslot) {
2869   LoadRoot(at, index);
2870   Branch(L, cond, rs, Operand(at), bdslot);
2871 }
2872 
2873 
BranchShortHelper(int16_t offset,Label * L,BranchDelaySlot bdslot)2874 void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
2875                                        BranchDelaySlot bdslot) {
2876   DCHECK(L == nullptr || offset == 0);
2877   offset = GetOffset(offset, L, OffsetSize::kOffset16);
2878   b(offset);
2879 
2880   // Emit a nop in the branch delay slot if required.
2881   if (bdslot == PROTECT)
2882     nop();
2883 }
2884 
2885 
BranchShortHelperR6(int32_t offset,Label * L)2886 void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
2887   DCHECK(L == nullptr || offset == 0);
2888   offset = GetOffset(offset, L, OffsetSize::kOffset26);
2889   bc(offset);
2890 }
2891 
2892 
BranchShort(int32_t offset,BranchDelaySlot bdslot)2893 void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
2894   if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
2895     DCHECK(is_int26(offset));
2896     BranchShortHelperR6(offset, nullptr);
2897   } else {
2898     DCHECK(is_int16(offset));
2899     BranchShortHelper(offset, nullptr, bdslot);
2900   }
2901 }
2902 
2903 
BranchShort(Label * L,BranchDelaySlot bdslot)2904 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2905   if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
2906     BranchShortHelperR6(0, L);
2907   } else {
2908     BranchShortHelper(0, L, bdslot);
2909   }
2910 }
2911 
2912 
IsZero(const Operand & rt)2913 static inline bool IsZero(const Operand& rt) {
2914   if (rt.is_reg()) {
2915     return rt.rm().is(zero_reg);
2916   } else {
2917     return rt.immediate() == 0;
2918   }
2919 }
2920 
2921 
GetOffset(int32_t offset,Label * L,OffsetSize bits)2922 int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
2923   if (L) {
2924     offset = branch_offset_helper(L, bits) >> 2;
2925   } else {
2926     DCHECK(is_intn(offset, bits));
2927   }
2928   return offset;
2929 }
2930 
2931 
GetRtAsRegisterHelper(const Operand & rt,Register scratch)2932 Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
2933                                                Register scratch) {
2934   Register r2 = no_reg;
2935   if (rt.is_reg()) {
2936     r2 = rt.rm_;
2937   } else {
2938     r2 = scratch;
2939     li(r2, rt);
2940   }
2941 
2942   return r2;
2943 }
2944 
2945 
BranchShortHelperR6(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt)2946 bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
2947                                          Condition cond, Register rs,
2948                                          const Operand& rt) {
2949   DCHECK(L == nullptr || offset == 0);
2950   Register scratch = rs.is(at) ? t8 : at;
2951   OffsetSize bits = OffsetSize::kOffset16;
2952 
2953   // Be careful to always use shifted_branch_offset only just before the
2954   // branch instruction, as the location will be remember for patching the
2955   // target.
2956   {
2957     BlockTrampolinePoolScope block_trampoline_pool(this);
2958     switch (cond) {
2959       case cc_always:
2960         bits = OffsetSize::kOffset26;
2961         if (!is_near(L, bits)) return false;
2962         offset = GetOffset(offset, L, bits);
2963         bc(offset);
2964         break;
2965       case eq:
2966         if (rs.code() == rt.rm_.reg_code) {
2967           // Pre R6 beq is used here to make the code patchable. Otherwise bc
2968           // should be used which has no condition field so is not patchable.
2969           bits = OffsetSize::kOffset16;
2970           if (!is_near(L, bits)) return false;
2971           scratch = GetRtAsRegisterHelper(rt, scratch);
2972           offset = GetOffset(offset, L, bits);
2973           beq(rs, scratch, offset);
2974           nop();
2975         } else if (IsZero(rt)) {
2976           bits = OffsetSize::kOffset21;
2977           if (!is_near(L, bits)) return false;
2978           offset = GetOffset(offset, L, bits);
2979           beqzc(rs, offset);
2980         } else {
2981           // We don't want any other register but scratch clobbered.
2982           bits = OffsetSize::kOffset16;
2983           if (!is_near(L, bits)) return false;
2984           scratch = GetRtAsRegisterHelper(rt, scratch);
2985           offset = GetOffset(offset, L, bits);
2986           beqc(rs, scratch, offset);
2987         }
2988         break;
2989       case ne:
2990         if (rs.code() == rt.rm_.reg_code) {
2991           // Pre R6 bne is used here to make the code patchable. Otherwise we
2992           // should not generate any instruction.
2993           bits = OffsetSize::kOffset16;
2994           if (!is_near(L, bits)) return false;
2995           scratch = GetRtAsRegisterHelper(rt, scratch);
2996           offset = GetOffset(offset, L, bits);
2997           bne(rs, scratch, offset);
2998           nop();
2999         } else if (IsZero(rt)) {
3000           bits = OffsetSize::kOffset21;
3001           if (!is_near(L, bits)) return false;
3002           offset = GetOffset(offset, L, bits);
3003           bnezc(rs, offset);
3004         } else {
3005           // We don't want any other register but scratch clobbered.
3006           bits = OffsetSize::kOffset16;
3007           if (!is_near(L, bits)) return false;
3008           scratch = GetRtAsRegisterHelper(rt, scratch);
3009           offset = GetOffset(offset, L, bits);
3010           bnec(rs, scratch, offset);
3011         }
3012         break;
3013 
3014       // Signed comparison.
3015       case greater:
3016         // rs > rt
3017         if (rs.code() == rt.rm_.reg_code) {
3018           break;  // No code needs to be emitted.
3019         } else if (rs.is(zero_reg)) {
3020           bits = OffsetSize::kOffset16;
3021           if (!is_near(L, bits)) return false;
3022           scratch = GetRtAsRegisterHelper(rt, scratch);
3023           offset = GetOffset(offset, L, bits);
3024           bltzc(scratch, offset);
3025         } else if (IsZero(rt)) {
3026           bits = OffsetSize::kOffset16;
3027           if (!is_near(L, bits)) return false;
3028           offset = GetOffset(offset, L, bits);
3029           bgtzc(rs, offset);
3030         } else {
3031           bits = OffsetSize::kOffset16;
3032           if (!is_near(L, bits)) return false;
3033           scratch = GetRtAsRegisterHelper(rt, scratch);
3034           DCHECK(!rs.is(scratch));
3035           offset = GetOffset(offset, L, bits);
3036           bltc(scratch, rs, offset);
3037         }
3038         break;
3039       case greater_equal:
3040         // rs >= rt
3041         if (rs.code() == rt.rm_.reg_code) {
3042           bits = OffsetSize::kOffset26;
3043           if (!is_near(L, bits)) return false;
3044           offset = GetOffset(offset, L, bits);
3045           bc(offset);
3046         } else if (rs.is(zero_reg)) {
3047           bits = OffsetSize::kOffset16;
3048           if (!is_near(L, bits)) return false;
3049           scratch = GetRtAsRegisterHelper(rt, scratch);
3050           offset = GetOffset(offset, L, bits);
3051           blezc(scratch, offset);
3052         } else if (IsZero(rt)) {
3053           bits = OffsetSize::kOffset16;
3054           if (!is_near(L, bits)) return false;
3055           offset = GetOffset(offset, L, bits);
3056           bgezc(rs, offset);
3057         } else {
3058           bits = OffsetSize::kOffset16;
3059           if (!is_near(L, bits)) return false;
3060           scratch = GetRtAsRegisterHelper(rt, scratch);
3061           DCHECK(!rs.is(scratch));
3062           offset = GetOffset(offset, L, bits);
3063           bgec(rs, scratch, offset);
3064         }
3065         break;
3066       case less:
3067         // rs < rt
3068         if (rs.code() == rt.rm_.reg_code) {
3069           break;  // No code needs to be emitted.
3070         } else if (rs.is(zero_reg)) {
3071           bits = OffsetSize::kOffset16;
3072           if (!is_near(L, bits)) return false;
3073           scratch = GetRtAsRegisterHelper(rt, scratch);
3074           offset = GetOffset(offset, L, bits);
3075           bgtzc(scratch, offset);
3076         } else if (IsZero(rt)) {
3077           bits = OffsetSize::kOffset16;
3078           if (!is_near(L, bits)) return false;
3079           offset = GetOffset(offset, L, bits);
3080           bltzc(rs, offset);
3081         } else {
3082           bits = OffsetSize::kOffset16;
3083           if (!is_near(L, bits)) return false;
3084           scratch = GetRtAsRegisterHelper(rt, scratch);
3085           DCHECK(!rs.is(scratch));
3086           offset = GetOffset(offset, L, bits);
3087           bltc(rs, scratch, offset);
3088         }
3089         break;
3090       case less_equal:
3091         // rs <= rt
3092         if (rs.code() == rt.rm_.reg_code) {
3093           bits = OffsetSize::kOffset26;
3094           if (!is_near(L, bits)) return false;
3095           offset = GetOffset(offset, L, bits);
3096           bc(offset);
3097         } else if (rs.is(zero_reg)) {
3098           bits = OffsetSize::kOffset16;
3099           if (!is_near(L, bits)) return false;
3100           scratch = GetRtAsRegisterHelper(rt, scratch);
3101           offset = GetOffset(offset, L, bits);
3102           bgezc(scratch, offset);
3103         } else if (IsZero(rt)) {
3104           bits = OffsetSize::kOffset16;
3105           if (!is_near(L, bits)) return false;
3106           offset = GetOffset(offset, L, bits);
3107           blezc(rs, offset);
3108         } else {
3109           bits = OffsetSize::kOffset16;
3110           if (!is_near(L, bits)) return false;
3111           scratch = GetRtAsRegisterHelper(rt, scratch);
3112           DCHECK(!rs.is(scratch));
3113           offset = GetOffset(offset, L, bits);
3114           bgec(scratch, rs, offset);
3115         }
3116         break;
3117 
3118       // Unsigned comparison.
3119       case Ugreater:
3120         // rs > rt
3121         if (rs.code() == rt.rm_.reg_code) {
3122           break;  // No code needs to be emitted.
3123         } else if (rs.is(zero_reg)) {
3124           bits = OffsetSize::kOffset21;
3125           if (!is_near(L, bits)) return false;
3126           scratch = GetRtAsRegisterHelper(rt, scratch);
3127           offset = GetOffset(offset, L, bits);
3128           bnezc(scratch, offset);
3129         } else if (IsZero(rt)) {
3130           bits = OffsetSize::kOffset21;
3131           if (!is_near(L, bits)) return false;
3132           offset = GetOffset(offset, L, bits);
3133           bnezc(rs, offset);
3134         } else {
3135           bits = OffsetSize::kOffset16;
3136           if (!is_near(L, bits)) return false;
3137           scratch = GetRtAsRegisterHelper(rt, scratch);
3138           DCHECK(!rs.is(scratch));
3139           offset = GetOffset(offset, L, bits);
3140           bltuc(scratch, rs, offset);
3141         }
3142         break;
3143       case Ugreater_equal:
3144         // rs >= rt
3145         if (rs.code() == rt.rm_.reg_code) {
3146           bits = OffsetSize::kOffset26;
3147           if (!is_near(L, bits)) return false;
3148           offset = GetOffset(offset, L, bits);
3149           bc(offset);
3150         } else if (rs.is(zero_reg)) {
3151           bits = OffsetSize::kOffset21;
3152           if (!is_near(L, bits)) return false;
3153           scratch = GetRtAsRegisterHelper(rt, scratch);
3154           offset = GetOffset(offset, L, bits);
3155           beqzc(scratch, offset);
3156         } else if (IsZero(rt)) {
3157           bits = OffsetSize::kOffset26;
3158           if (!is_near(L, bits)) return false;
3159           offset = GetOffset(offset, L, bits);
3160           bc(offset);
3161         } else {
3162           bits = OffsetSize::kOffset16;
3163           if (!is_near(L, bits)) return false;
3164           scratch = GetRtAsRegisterHelper(rt, scratch);
3165           DCHECK(!rs.is(scratch));
3166           offset = GetOffset(offset, L, bits);
3167           bgeuc(rs, scratch, offset);
3168         }
3169         break;
3170       case Uless:
3171         // rs < rt
3172         if (rs.code() == rt.rm_.reg_code) {
3173           break;  // No code needs to be emitted.
3174         } else if (rs.is(zero_reg)) {
3175           bits = OffsetSize::kOffset21;
3176           if (!is_near(L, bits)) return false;
3177           scratch = GetRtAsRegisterHelper(rt, scratch);
3178           offset = GetOffset(offset, L, bits);
3179           bnezc(scratch, offset);
3180         } else if (IsZero(rt)) {
3181           break;  // No code needs to be emitted.
3182         } else {
3183           bits = OffsetSize::kOffset16;
3184           if (!is_near(L, bits)) return false;
3185           scratch = GetRtAsRegisterHelper(rt, scratch);
3186           DCHECK(!rs.is(scratch));
3187           offset = GetOffset(offset, L, bits);
3188           bltuc(rs, scratch, offset);
3189         }
3190         break;
3191       case Uless_equal:
3192         // rs <= rt
3193         if (rs.code() == rt.rm_.reg_code) {
3194           bits = OffsetSize::kOffset26;
3195           if (!is_near(L, bits)) return false;
3196           offset = GetOffset(offset, L, bits);
3197           bc(offset);
3198         } else if (rs.is(zero_reg)) {
3199           bits = OffsetSize::kOffset26;
3200           if (!is_near(L, bits)) return false;
3201           scratch = GetRtAsRegisterHelper(rt, scratch);
3202           offset = GetOffset(offset, L, bits);
3203           bc(offset);
3204         } else if (IsZero(rt)) {
3205           bits = OffsetSize::kOffset21;
3206           if (!is_near(L, bits)) return false;
3207           offset = GetOffset(offset, L, bits);
3208           beqzc(rs, offset);
3209         } else {
3210           bits = OffsetSize::kOffset16;
3211           if (!is_near(L, bits)) return false;
3212           scratch = GetRtAsRegisterHelper(rt, scratch);
3213           DCHECK(!rs.is(scratch));
3214           offset = GetOffset(offset, L, bits);
3215           bgeuc(scratch, rs, offset);
3216         }
3217         break;
3218       default:
3219         UNREACHABLE();
3220     }
3221   }
3222   CheckTrampolinePoolQuick(1);
3223   return true;
3224 }
3225 
3226 
BranchShortHelper(int16_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3227 bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
3228                                        Register rs, const Operand& rt,
3229                                        BranchDelaySlot bdslot) {
3230   DCHECK(L == nullptr || offset == 0);
3231   if (!is_near(L, OffsetSize::kOffset16)) return false;
3232 
3233   Register scratch = at;
3234   int32_t offset32;
3235 
3236   // Be careful to always use shifted_branch_offset only just before the
3237   // branch instruction, as the location will be remember for patching the
3238   // target.
3239   {
3240     BlockTrampolinePoolScope block_trampoline_pool(this);
3241     switch (cond) {
3242       case cc_always:
3243         offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3244         b(offset32);
3245         break;
3246       case eq:
3247         if (IsZero(rt)) {
3248           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3249           beq(rs, zero_reg, offset32);
3250         } else {
3251           // We don't want any other register but scratch clobbered.
3252           scratch = GetRtAsRegisterHelper(rt, scratch);
3253           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3254           beq(rs, scratch, offset32);
3255         }
3256         break;
3257       case ne:
3258         if (IsZero(rt)) {
3259           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3260           bne(rs, zero_reg, offset32);
3261         } else {
3262           // We don't want any other register but scratch clobbered.
3263           scratch = GetRtAsRegisterHelper(rt, scratch);
3264           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3265           bne(rs, scratch, offset32);
3266         }
3267         break;
3268 
3269       // Signed comparison.
3270       case greater:
3271         if (IsZero(rt)) {
3272           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3273           bgtz(rs, offset32);
3274         } else {
3275           Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3276           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3277           bne(scratch, zero_reg, offset32);
3278         }
3279         break;
3280       case greater_equal:
3281         if (IsZero(rt)) {
3282           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3283           bgez(rs, offset32);
3284         } else {
3285           Slt(scratch, rs, rt);
3286           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3287           beq(scratch, zero_reg, offset32);
3288         }
3289         break;
3290       case less:
3291         if (IsZero(rt)) {
3292           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3293           bltz(rs, offset32);
3294         } else {
3295           Slt(scratch, rs, rt);
3296           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3297           bne(scratch, zero_reg, offset32);
3298         }
3299         break;
3300       case less_equal:
3301         if (IsZero(rt)) {
3302           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3303           blez(rs, offset32);
3304         } else {
3305           Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3306           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3307           beq(scratch, zero_reg, offset32);
3308         }
3309         break;
3310 
3311       // Unsigned comparison.
3312       case Ugreater:
3313         if (IsZero(rt)) {
3314           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3315           bne(rs, zero_reg, offset32);
3316         } else {
3317           Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3318           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3319           bne(scratch, zero_reg, offset32);
3320         }
3321         break;
3322       case Ugreater_equal:
3323         if (IsZero(rt)) {
3324           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3325           b(offset32);
3326         } else {
3327           Sltu(scratch, rs, rt);
3328           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3329           beq(scratch, zero_reg, offset32);
3330         }
3331         break;
3332       case Uless:
3333         if (IsZero(rt)) {
3334           return true;  // No code needs to be emitted.
3335         } else {
3336           Sltu(scratch, rs, rt);
3337           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3338           bne(scratch, zero_reg, offset32);
3339         }
3340         break;
3341       case Uless_equal:
3342         if (IsZero(rt)) {
3343           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3344           beq(rs, zero_reg, offset32);
3345         } else {
3346           Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3347           offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3348           beq(scratch, zero_reg, offset32);
3349         }
3350         break;
3351       default:
3352         UNREACHABLE();
3353     }
3354   }
3355   // Emit a nop in the branch delay slot if required.
3356   if (bdslot == PROTECT)
3357     nop();
3358 
3359   return true;
3360 }
3361 
3362 
BranchShortCheck(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3363 bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
3364                                       Register rs, const Operand& rt,
3365                                       BranchDelaySlot bdslot) {
3366   BRANCH_ARGS_CHECK(cond, rs, rt);
3367 
3368   if (!L) {
3369     if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3370       DCHECK(is_int26(offset));
3371       return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
3372     } else {
3373       DCHECK(is_int16(offset));
3374       return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3375     }
3376   } else {
3377     DCHECK(offset == 0);
3378     if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3379       return BranchShortHelperR6(0, L, cond, rs, rt);
3380     } else {
3381       return BranchShortHelper(0, L, cond, rs, rt, bdslot);
3382     }
3383   }
3384   return false;
3385 }
3386 
3387 
BranchShort(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3388 void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
3389                                  const Operand& rt, BranchDelaySlot bdslot) {
3390   BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3391 }
3392 
3393 
BranchShort(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3394 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
3395                                  const Operand& rt, BranchDelaySlot bdslot) {
3396   BranchShortCheck(0, L, cond, rs, rt, bdslot);
3397 }
3398 
3399 
BranchAndLink(int32_t offset,BranchDelaySlot bdslot)3400 void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
3401   BranchAndLinkShort(offset, bdslot);
3402 }
3403 
3404 
BranchAndLink(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3405 void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
3406                                    const Operand& rt, BranchDelaySlot bdslot) {
3407   bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3408   DCHECK(is_near);
3409   USE(is_near);
3410 }
3411 
3412 
BranchAndLink(Label * L,BranchDelaySlot bdslot)3413 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
3414   if (L->is_bound()) {
3415     if (is_near_branch(L)) {
3416       BranchAndLinkShort(L, bdslot);
3417     } else {
3418       BranchAndLinkLong(L, bdslot);
3419     }
3420   } else {
3421     if (is_trampoline_emitted()) {
3422       BranchAndLinkLong(L, bdslot);
3423     } else {
3424       BranchAndLinkShort(L, bdslot);
3425     }
3426   }
3427 }
3428 
3429 
BranchAndLink(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3430 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
3431                                    const Operand& rt,
3432                                    BranchDelaySlot bdslot) {
3433   if (L->is_bound()) {
3434     if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
3435       Label skip;
3436       Condition neg_cond = NegateCondition(cond);
3437       BranchShort(&skip, neg_cond, rs, rt);
3438       BranchAndLinkLong(L, bdslot);
3439       bind(&skip);
3440     }
3441   } else {
3442     if (is_trampoline_emitted()) {
3443       Label skip;
3444       Condition neg_cond = NegateCondition(cond);
3445       BranchShort(&skip, neg_cond, rs, rt);
3446       BranchAndLinkLong(L, bdslot);
3447       bind(&skip);
3448     } else {
3449       BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
3450     }
3451   }
3452 }
3453 
3454 
BranchAndLinkShortHelper(int16_t offset,Label * L,BranchDelaySlot bdslot)3455 void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3456                                               BranchDelaySlot bdslot) {
3457   DCHECK(L == nullptr || offset == 0);
3458   offset = GetOffset(offset, L, OffsetSize::kOffset16);
3459   bal(offset);
3460 
3461   // Emit a nop in the branch delay slot if required.
3462   if (bdslot == PROTECT)
3463     nop();
3464 }
3465 
3466 
BranchAndLinkShortHelperR6(int32_t offset,Label * L)3467 void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
3468   DCHECK(L == nullptr || offset == 0);
3469   offset = GetOffset(offset, L, OffsetSize::kOffset26);
3470   balc(offset);
3471 }
3472 
3473 
BranchAndLinkShort(int32_t offset,BranchDelaySlot bdslot)3474 void MacroAssembler::BranchAndLinkShort(int32_t offset,
3475                                         BranchDelaySlot bdslot) {
3476   if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3477     DCHECK(is_int26(offset));
3478     BranchAndLinkShortHelperR6(offset, nullptr);
3479   } else {
3480     DCHECK(is_int16(offset));
3481     BranchAndLinkShortHelper(offset, nullptr, bdslot);
3482   }
3483 }
3484 
3485 
BranchAndLinkShort(Label * L,BranchDelaySlot bdslot)3486 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
3487   if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3488     BranchAndLinkShortHelperR6(0, L);
3489   } else {
3490     BranchAndLinkShortHelper(0, L, bdslot);
3491   }
3492 }
3493 
3494 
BranchAndLinkShortHelperR6(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt)3495 bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
3496                                                 Condition cond, Register rs,
3497                                                 const Operand& rt) {
3498   DCHECK(L == nullptr || offset == 0);
3499   Register scratch = rs.is(at) ? t8 : at;
3500   OffsetSize bits = OffsetSize::kOffset16;
3501 
3502   BlockTrampolinePoolScope block_trampoline_pool(this);
3503   DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
3504   switch (cond) {
3505     case cc_always:
3506       bits = OffsetSize::kOffset26;
3507       if (!is_near(L, bits)) return false;
3508       offset = GetOffset(offset, L, bits);
3509       balc(offset);
3510       break;
3511     case eq:
3512       if (!is_near(L, bits)) return false;
3513       Subu(scratch, rs, rt);
3514       offset = GetOffset(offset, L, bits);
3515       beqzalc(scratch, offset);
3516       break;
3517     case ne:
3518       if (!is_near(L, bits)) return false;
3519       Subu(scratch, rs, rt);
3520       offset = GetOffset(offset, L, bits);
3521       bnezalc(scratch, offset);
3522       break;
3523 
3524     // Signed comparison.
3525     case greater:
3526       // rs > rt
3527       if (rs.code() == rt.rm_.reg_code) {
3528         break;  // No code needs to be emitted.
3529       } else if (rs.is(zero_reg)) {
3530         if (!is_near(L, bits)) return false;
3531         scratch = GetRtAsRegisterHelper(rt, scratch);
3532         offset = GetOffset(offset, L, bits);
3533         bltzalc(scratch, offset);
3534       } else if (IsZero(rt)) {
3535         if (!is_near(L, bits)) return false;
3536         offset = GetOffset(offset, L, bits);
3537         bgtzalc(rs, offset);
3538       } else {
3539         if (!is_near(L, bits)) return false;
3540         Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3541         offset = GetOffset(offset, L, bits);
3542         bnezalc(scratch, offset);
3543       }
3544       break;
3545     case greater_equal:
3546       // rs >= rt
3547       if (rs.code() == rt.rm_.reg_code) {
3548         bits = OffsetSize::kOffset26;
3549         if (!is_near(L, bits)) return false;
3550         offset = GetOffset(offset, L, bits);
3551         balc(offset);
3552       } else if (rs.is(zero_reg)) {
3553         if (!is_near(L, bits)) return false;
3554         scratch = GetRtAsRegisterHelper(rt, scratch);
3555         offset = GetOffset(offset, L, bits);
3556         blezalc(scratch, offset);
3557       } else if (IsZero(rt)) {
3558         if (!is_near(L, bits)) return false;
3559         offset = GetOffset(offset, L, bits);
3560         bgezalc(rs, offset);
3561       } else {
3562         if (!is_near(L, bits)) return false;
3563         Slt(scratch, rs, rt);
3564         offset = GetOffset(offset, L, bits);
3565         beqzalc(scratch, offset);
3566       }
3567       break;
3568     case less:
3569       // rs < rt
3570       if (rs.code() == rt.rm_.reg_code) {
3571         break;  // No code needs to be emitted.
3572       } else if (rs.is(zero_reg)) {
3573         if (!is_near(L, bits)) return false;
3574         scratch = GetRtAsRegisterHelper(rt, scratch);
3575         offset = GetOffset(offset, L, bits);
3576         bgtzalc(scratch, offset);
3577       } else if (IsZero(rt)) {
3578         if (!is_near(L, bits)) return false;
3579         offset = GetOffset(offset, L, bits);
3580         bltzalc(rs, offset);
3581       } else {
3582         if (!is_near(L, bits)) return false;
3583         Slt(scratch, rs, rt);
3584         offset = GetOffset(offset, L, bits);
3585         bnezalc(scratch, offset);
3586       }
3587       break;
3588     case less_equal:
3589       // rs <= r2
3590       if (rs.code() == rt.rm_.reg_code) {
3591         bits = OffsetSize::kOffset26;
3592         if (!is_near(L, bits)) return false;
3593         offset = GetOffset(offset, L, bits);
3594         balc(offset);
3595       } else if (rs.is(zero_reg)) {
3596         if (!is_near(L, bits)) return false;
3597         scratch = GetRtAsRegisterHelper(rt, scratch);
3598         offset = GetOffset(offset, L, bits);
3599         bgezalc(scratch, offset);
3600       } else if (IsZero(rt)) {
3601         if (!is_near(L, bits)) return false;
3602         offset = GetOffset(offset, L, bits);
3603         blezalc(rs, offset);
3604       } else {
3605         if (!is_near(L, bits)) return false;
3606         Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3607         offset = GetOffset(offset, L, bits);
3608         beqzalc(scratch, offset);
3609       }
3610       break;
3611 
3612 
3613     // Unsigned comparison.
3614     case Ugreater:
3615       // rs > r2
3616       if (!is_near(L, bits)) return false;
3617       Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3618       offset = GetOffset(offset, L, bits);
3619       bnezalc(scratch, offset);
3620       break;
3621     case Ugreater_equal:
3622       // rs >= r2
3623       if (!is_near(L, bits)) return false;
3624       Sltu(scratch, rs, rt);
3625       offset = GetOffset(offset, L, bits);
3626       beqzalc(scratch, offset);
3627       break;
3628     case Uless:
3629       // rs < r2
3630       if (!is_near(L, bits)) return false;
3631       Sltu(scratch, rs, rt);
3632       offset = GetOffset(offset, L, bits);
3633       bnezalc(scratch, offset);
3634       break;
3635     case Uless_equal:
3636       // rs <= r2
3637       if (!is_near(L, bits)) return false;
3638       Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3639       offset = GetOffset(offset, L, bits);
3640       beqzalc(scratch, offset);
3641       break;
3642     default:
3643       UNREACHABLE();
3644   }
3645   return true;
3646 }
3647 
3648 
3649 // Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
3650 // with the slt instructions. We could use sub or add instead but we would miss
3651 // overflow cases, so we keep slt and add an intermediate third instruction.
BranchAndLinkShortHelper(int16_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3652 bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3653                                               Condition cond, Register rs,
3654                                               const Operand& rt,
3655                                               BranchDelaySlot bdslot) {
3656   DCHECK(L == nullptr || offset == 0);
3657   if (!is_near(L, OffsetSize::kOffset16)) return false;
3658 
3659   Register scratch = t8;
3660   BlockTrampolinePoolScope block_trampoline_pool(this);
3661 
3662   switch (cond) {
3663     case cc_always:
3664       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3665       bal(offset);
3666       break;
3667     case eq:
3668       bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3669       nop();
3670       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3671       bal(offset);
3672       break;
3673     case ne:
3674       beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3675       nop();
3676       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3677       bal(offset);
3678       break;
3679 
3680     // Signed comparison.
3681     case greater:
3682       Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3683       addiu(scratch, scratch, -1);
3684       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3685       bgezal(scratch, offset);
3686       break;
3687     case greater_equal:
3688       Slt(scratch, rs, rt);
3689       addiu(scratch, scratch, -1);
3690       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3691       bltzal(scratch, offset);
3692       break;
3693     case less:
3694       Slt(scratch, rs, rt);
3695       addiu(scratch, scratch, -1);
3696       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3697       bgezal(scratch, offset);
3698       break;
3699     case less_equal:
3700       Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3701       addiu(scratch, scratch, -1);
3702       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3703       bltzal(scratch, offset);
3704       break;
3705 
3706     // Unsigned comparison.
3707     case Ugreater:
3708       Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3709       addiu(scratch, scratch, -1);
3710       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3711       bgezal(scratch, offset);
3712       break;
3713     case Ugreater_equal:
3714       Sltu(scratch, rs, rt);
3715       addiu(scratch, scratch, -1);
3716       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3717       bltzal(scratch, offset);
3718       break;
3719     case Uless:
3720       Sltu(scratch, rs, rt);
3721       addiu(scratch, scratch, -1);
3722       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3723       bgezal(scratch, offset);
3724       break;
3725     case Uless_equal:
3726       Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3727       addiu(scratch, scratch, -1);
3728       offset = GetOffset(offset, L, OffsetSize::kOffset16);
3729       bltzal(scratch, offset);
3730       break;
3731 
3732     default:
3733       UNREACHABLE();
3734   }
3735 
3736   // Emit a nop in the branch delay slot if required.
3737   if (bdslot == PROTECT)
3738     nop();
3739 
3740   return true;
3741 }
3742 
3743 
BranchAndLinkShortCheck(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3744 bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
3745                                              Condition cond, Register rs,
3746                                              const Operand& rt,
3747                                              BranchDelaySlot bdslot) {
3748   BRANCH_ARGS_CHECK(cond, rs, rt);
3749 
3750   if (!L) {
3751     if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3752       DCHECK(is_int26(offset));
3753       return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
3754     } else {
3755       DCHECK(is_int16(offset));
3756       return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3757     }
3758   } else {
3759     DCHECK(offset == 0);
3760     if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3761       return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
3762     } else {
3763       return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
3764     }
3765   }
3766   return false;
3767 }
3768 
3769 
Jump(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3770 void MacroAssembler::Jump(Register target,
3771                           Condition cond,
3772                           Register rs,
3773                           const Operand& rt,
3774                           BranchDelaySlot bd) {
3775   BlockTrampolinePoolScope block_trampoline_pool(this);
3776   if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
3777     if (cond == cc_always) {
3778       jic(target, 0);
3779     } else {
3780       BRANCH_ARGS_CHECK(cond, rs, rt);
3781       Branch(2, NegateCondition(cond), rs, rt);
3782       jic(target, 0);
3783     }
3784   } else {
3785     if (cond == cc_always) {
3786       jr(target);
3787     } else {
3788       BRANCH_ARGS_CHECK(cond, rs, rt);
3789       Branch(2, NegateCondition(cond), rs, rt);
3790       jr(target);
3791     }
3792     // Emit a nop in the branch delay slot if required.
3793     if (bd == PROTECT) nop();
3794   }
3795 }
3796 
3797 
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3798 void MacroAssembler::Jump(intptr_t target,
3799                           RelocInfo::Mode rmode,
3800                           Condition cond,
3801                           Register rs,
3802                           const Operand& rt,
3803                           BranchDelaySlot bd) {
3804   Label skip;
3805   if (cond != cc_always) {
3806     Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
3807   }
3808   // The first instruction of 'li' may be placed in the delay slot.
3809   // This is not an issue, t9 is expected to be clobbered anyway.
3810   li(t9, Operand(target, rmode));
3811   Jump(t9, al, zero_reg, Operand(zero_reg), bd);
3812   bind(&skip);
3813 }
3814 
3815 
Jump(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3816 void MacroAssembler::Jump(Address target,
3817                           RelocInfo::Mode rmode,
3818                           Condition cond,
3819                           Register rs,
3820                           const Operand& rt,
3821                           BranchDelaySlot bd) {
3822   DCHECK(!RelocInfo::IsCodeTarget(rmode));
3823   Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
3824 }
3825 
3826 
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3827 void MacroAssembler::Jump(Handle<Code> code,
3828                           RelocInfo::Mode rmode,
3829                           Condition cond,
3830                           Register rs,
3831                           const Operand& rt,
3832                           BranchDelaySlot bd) {
3833   DCHECK(RelocInfo::IsCodeTarget(rmode));
3834   AllowDeferredHandleDereference embedding_raw_address;
3835   Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
3836 }
3837 
3838 
CallSize(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3839 int MacroAssembler::CallSize(Register target,
3840                              Condition cond,
3841                              Register rs,
3842                              const Operand& rt,
3843                              BranchDelaySlot bd) {
3844   int size = 0;
3845 
3846   if (cond == cc_always) {
3847     size += 1;
3848   } else {
3849     size += 3;
3850   }
3851 
3852   if (bd == PROTECT && !IsMipsArchVariant(kMips32r6)) size += 1;
3853 
3854   return size * kInstrSize;
3855 }
3856 
3857 
3858 // Note: To call gcc-compiled C code on mips, you must call thru t9.
Call(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3859 void MacroAssembler::Call(Register target,
3860                           Condition cond,
3861                           Register rs,
3862                           const Operand& rt,
3863                           BranchDelaySlot bd) {
3864 #ifdef DEBUG
3865   int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
3866 #endif
3867 
3868   BlockTrampolinePoolScope block_trampoline_pool(this);
3869   Label start;
3870   bind(&start);
3871   if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
3872     if (cond == cc_always) {
3873       jialc(target, 0);
3874     } else {
3875       BRANCH_ARGS_CHECK(cond, rs, rt);
3876       Branch(2, NegateCondition(cond), rs, rt);
3877       jialc(target, 0);
3878     }
3879   } else {
3880     if (cond == cc_always) {
3881       jalr(target);
3882     } else {
3883       BRANCH_ARGS_CHECK(cond, rs, rt);
3884       Branch(2, NegateCondition(cond), rs, rt);
3885       jalr(target);
3886     }
3887     // Emit a nop in the branch delay slot if required.
3888     if (bd == PROTECT) nop();
3889   }
3890 
3891 #ifdef DEBUG
3892   CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
3893            SizeOfCodeGeneratedSince(&start));
3894 #endif
3895 }
3896 
3897 
CallSize(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3898 int MacroAssembler::CallSize(Address target,
3899                              RelocInfo::Mode rmode,
3900                              Condition cond,
3901                              Register rs,
3902                              const Operand& rt,
3903                              BranchDelaySlot bd) {
3904   int size = CallSize(t9, cond, rs, rt, bd);
3905   return size + 2 * kInstrSize;
3906 }
3907 
3908 
Call(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3909 void MacroAssembler::Call(Address target,
3910                           RelocInfo::Mode rmode,
3911                           Condition cond,
3912                           Register rs,
3913                           const Operand& rt,
3914                           BranchDelaySlot bd) {
3915   BlockTrampolinePoolScope block_trampoline_pool(this);
3916   Label start;
3917   bind(&start);
3918   int32_t target_int = reinterpret_cast<int32_t>(target);
3919   li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
3920   Call(t9, cond, rs, rt, bd);
3921   DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
3922             SizeOfCodeGeneratedSince(&start));
3923 }
3924 
3925 
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3926 int MacroAssembler::CallSize(Handle<Code> code,
3927                              RelocInfo::Mode rmode,
3928                              TypeFeedbackId ast_id,
3929                              Condition cond,
3930                              Register rs,
3931                              const Operand& rt,
3932                              BranchDelaySlot bd) {
3933   AllowDeferredHandleDereference using_raw_address;
3934   return CallSize(reinterpret_cast<Address>(code.location()),
3935       rmode, cond, rs, rt, bd);
3936 }
3937 
3938 
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3939 void MacroAssembler::Call(Handle<Code> code,
3940                           RelocInfo::Mode rmode,
3941                           TypeFeedbackId ast_id,
3942                           Condition cond,
3943                           Register rs,
3944                           const Operand& rt,
3945                           BranchDelaySlot bd) {
3946   BlockTrampolinePoolScope block_trampoline_pool(this);
3947   Label start;
3948   bind(&start);
3949   DCHECK(RelocInfo::IsCodeTarget(rmode));
3950   if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
3951     SetRecordedAstId(ast_id);
3952     rmode = RelocInfo::CODE_TARGET_WITH_ID;
3953   }
3954   AllowDeferredHandleDereference embedding_raw_address;
3955   Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
3956   DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
3957             SizeOfCodeGeneratedSince(&start));
3958 }
3959 
3960 
Ret(Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3961 void MacroAssembler::Ret(Condition cond,
3962                          Register rs,
3963                          const Operand& rt,
3964                          BranchDelaySlot bd) {
3965   Jump(ra, cond, rs, rt, bd);
3966 }
3967 
3968 
BranchLong(Label * L,BranchDelaySlot bdslot)3969 void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
3970   if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
3971       (!L->is_bound() || is_near_r6(L))) {
3972     BranchShortHelperR6(0, L);
3973   } else {
3974     BlockTrampolinePoolScope block_trampoline_pool(this);
3975     uint32_t imm32;
3976     imm32 = jump_address(L);
3977     if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3978       uint32_t lui_offset, jic_offset;
3979       UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
3980       {
3981         BlockGrowBufferScope block_buf_growth(this);
3982         // Buffer growth (and relocation) must be blocked for internal
3983         // references until associated instructions are emitted and
3984         // available to be patched.
3985         RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3986         lui(at, lui_offset);
3987         jic(at, jic_offset);
3988       }
3989       CheckBuffer();
3990     } else {
3991       {
3992         BlockGrowBufferScope block_buf_growth(this);
3993         // Buffer growth (and relocation) must be blocked for internal
3994         // references
3995         // until associated instructions are emitted and available to be
3996         // patched.
3997         RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3998         lui(at, (imm32 & kHiMask) >> kLuiShift);
3999         ori(at, at, (imm32 & kImm16Mask));
4000       }
4001       CheckBuffer();
4002       jr(at);
4003       // Emit a nop in the branch delay slot if required.
4004       if (bdslot == PROTECT) nop();
4005     }
4006   }
4007 }
4008 
4009 
BranchAndLinkLong(Label * L,BranchDelaySlot bdslot)4010 void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
4011   if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
4012       (!L->is_bound() || is_near_r6(L))) {
4013     BranchAndLinkShortHelperR6(0, L);
4014   } else {
4015     BlockTrampolinePoolScope block_trampoline_pool(this);
4016     uint32_t imm32;
4017     imm32 = jump_address(L);
4018     if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
4019       uint32_t lui_offset, jic_offset;
4020       UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
4021       {
4022         BlockGrowBufferScope block_buf_growth(this);
4023         // Buffer growth (and relocation) must be blocked for internal
4024         // references until associated instructions are emitted and
4025         // available to be patched.
4026         RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
4027         lui(at, lui_offset);
4028         jialc(at, jic_offset);
4029       }
4030       CheckBuffer();
4031     } else {
4032       {
4033         BlockGrowBufferScope block_buf_growth(this);
4034         // Buffer growth (and relocation) must be blocked for internal
4035         // references
4036         // until associated instructions are emitted and available to be
4037         // patched.
4038         RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
4039         lui(at, (imm32 & kHiMask) >> kLuiShift);
4040         ori(at, at, (imm32 & kImm16Mask));
4041       }
4042       CheckBuffer();
4043       jalr(at);
4044       // Emit a nop in the branch delay slot if required.
4045       if (bdslot == PROTECT) nop();
4046     }
4047   }
4048 }
4049 
4050 
DropAndRet(int drop)4051 void MacroAssembler::DropAndRet(int drop) {
4052   DCHECK(is_int16(drop * kPointerSize));
4053   Ret(USE_DELAY_SLOT);
4054   addiu(sp, sp, drop * kPointerSize);
4055 }
4056 
DropAndRet(int drop,Condition cond,Register r1,const Operand & r2)4057 void MacroAssembler::DropAndRet(int drop,
4058                                 Condition cond,
4059                                 Register r1,
4060                                 const Operand& r2) {
4061   // Both Drop and Ret need to be conditional.
4062   Label skip;
4063   if (cond != cc_always) {
4064     Branch(&skip, NegateCondition(cond), r1, r2);
4065   }
4066 
4067   Drop(drop);
4068   Ret();
4069 
4070   if (cond != cc_always) {
4071     bind(&skip);
4072   }
4073 }
4074 
4075 
Drop(int count,Condition cond,Register reg,const Operand & op)4076 void MacroAssembler::Drop(int count,
4077                           Condition cond,
4078                           Register reg,
4079                           const Operand& op) {
4080   if (count <= 0) {
4081     return;
4082   }
4083 
4084   Label skip;
4085 
4086   if (cond != al) {
4087      Branch(&skip, NegateCondition(cond), reg, op);
4088   }
4089 
4090   Addu(sp, sp, Operand(count * kPointerSize));
4091 
4092   if (cond != al) {
4093     bind(&skip);
4094   }
4095 }
4096 
4097 
4098 
Swap(Register reg1,Register reg2,Register scratch)4099 void MacroAssembler::Swap(Register reg1,
4100                           Register reg2,
4101                           Register scratch) {
4102   if (scratch.is(no_reg)) {
4103     Xor(reg1, reg1, Operand(reg2));
4104     Xor(reg2, reg2, Operand(reg1));
4105     Xor(reg1, reg1, Operand(reg2));
4106   } else {
4107     mov(scratch, reg1);
4108     mov(reg1, reg2);
4109     mov(reg2, scratch);
4110   }
4111 }
4112 
4113 
Call(Label * target)4114 void MacroAssembler::Call(Label* target) {
4115   BranchAndLink(target);
4116 }
4117 
4118 
Push(Handle<Object> handle)4119 void MacroAssembler::Push(Handle<Object> handle) {
4120   li(at, Operand(handle));
4121   push(at);
4122 }
4123 
4124 
DebugBreak()4125 void MacroAssembler::DebugBreak() {
4126   PrepareCEntryArgs(0);
4127   PrepareCEntryFunction(
4128       ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
4129   CEntryStub ces(isolate(), 1);
4130   DCHECK(AllowThisStubCall(&ces));
4131   Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
4132 }
4133 
4134 
4135 // ---------------------------------------------------------------------------
4136 // Exception handling.
4137 
PushStackHandler()4138 void MacroAssembler::PushStackHandler() {
4139   // Adjust this code if not the case.
4140   STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
4141   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
4142 
4143   // Link the current handler as the next handler.
4144   li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
4145   lw(t1, MemOperand(t2));
4146   push(t1);
4147 
4148   // Set this new handler as the current one.
4149   sw(sp, MemOperand(t2));
4150 }
4151 
4152 
PopStackHandler()4153 void MacroAssembler::PopStackHandler() {
4154   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4155   pop(a1);
4156   Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
4157   li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
4158   sw(a1, MemOperand(at));
4159 }
4160 
4161 
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)4162 void MacroAssembler::Allocate(int object_size,
4163                               Register result,
4164                               Register scratch1,
4165                               Register scratch2,
4166                               Label* gc_required,
4167                               AllocationFlags flags) {
4168   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
4169   DCHECK((flags & ALLOCATION_FOLDED) == 0);
4170   if (!FLAG_inline_new) {
4171     if (emit_debug_code()) {
4172       // Trash the registers to simulate an allocation failure.
4173       li(result, 0x7091);
4174       li(scratch1, 0x7191);
4175       li(scratch2, 0x7291);
4176     }
4177     jmp(gc_required);
4178     return;
4179   }
4180 
4181   DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
4182 
4183   // Make object size into bytes.
4184   if ((flags & SIZE_IN_WORDS) != 0) {
4185     object_size *= kPointerSize;
4186   }
4187   DCHECK_EQ(0, object_size & kObjectAlignmentMask);
4188 
4189   // Check relative positions of allocation top and limit addresses.
4190   // ARM adds additional checks to make sure the ldm instruction can be
4191   // used. On MIPS we don't have ldm so we don't need additional checks either.
4192   ExternalReference allocation_top =
4193       AllocationUtils::GetAllocationTopReference(isolate(), flags);
4194   ExternalReference allocation_limit =
4195       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4196 
4197   intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
4198   intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
4199   DCHECK((limit - top) == kPointerSize);
4200 
4201   // Set up allocation top address and allocation limit registers.
4202   Register top_address = scratch1;
4203   // This code stores a temporary value in t9.
4204   Register alloc_limit = t9;
4205   Register result_end = scratch2;
4206   li(top_address, Operand(allocation_top));
4207 
4208   if ((flags & RESULT_CONTAINS_TOP) == 0) {
4209     // Load allocation top into result and allocation limit into alloc_limit.
4210     lw(result, MemOperand(top_address));
4211     lw(alloc_limit, MemOperand(top_address, kPointerSize));
4212   } else {
4213     if (emit_debug_code()) {
4214       // Assert that result actually contains top on entry.
4215       lw(alloc_limit, MemOperand(top_address));
4216       Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
4217     }
4218     // Load allocation limit. Result already contains allocation top.
4219     lw(alloc_limit, MemOperand(top_address, limit - top));
4220   }
4221 
4222   if ((flags & DOUBLE_ALIGNMENT) != 0) {
4223     // Align the next allocation. Storing the filler map without checking top is
4224     // safe in new-space because the limit of the heap is aligned there.
4225     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4226     And(result_end, result, Operand(kDoubleAlignmentMask));
4227     Label aligned;
4228     Branch(&aligned, eq, result_end, Operand(zero_reg));
4229     if ((flags & PRETENURE) != 0) {
4230       Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit));
4231     }
4232     li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
4233     sw(result_end, MemOperand(result));
4234     Addu(result, result, Operand(kDoubleSize / 2));
4235     bind(&aligned);
4236   }
4237 
4238   // Calculate new top and bail out if new space is exhausted. Use result
4239   // to calculate the new top.
4240   Addu(result_end, result, Operand(object_size));
4241   Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
4242 
4243   if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
4244     // The top pointer is not updated for allocation folding dominators.
4245     sw(result_end, MemOperand(top_address));
4246   }
4247 
4248   // Tag object.
4249   Addu(result, result, Operand(kHeapObjectTag));
4250 }
4251 
4252 
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4253 void MacroAssembler::Allocate(Register object_size, Register result,
4254                               Register result_end, Register scratch,
4255                               Label* gc_required, AllocationFlags flags) {
4256   DCHECK((flags & ALLOCATION_FOLDED) == 0);
4257   if (!FLAG_inline_new) {
4258     if (emit_debug_code()) {
4259       // Trash the registers to simulate an allocation failure.
4260       li(result, 0x7091);
4261       li(scratch, 0x7191);
4262       li(result_end, 0x7291);
4263     }
4264     jmp(gc_required);
4265     return;
4266   }
4267 
4268   // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
4269   // is not specified. Other registers must not overlap.
4270   DCHECK(!AreAliased(object_size, result, scratch, t9, at));
4271   DCHECK(!AreAliased(result_end, result, scratch, t9, at));
4272   DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
4273 
4274   // Check relative positions of allocation top and limit addresses.
4275   // ARM adds additional checks to make sure the ldm instruction can be
4276   // used. On MIPS we don't have ldm so we don't need additional checks either.
4277   ExternalReference allocation_top =
4278       AllocationUtils::GetAllocationTopReference(isolate(), flags);
4279   ExternalReference allocation_limit =
4280       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4281   intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
4282   intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
4283   DCHECK((limit - top) == kPointerSize);
4284 
4285   // Set up allocation top address and allocation limit registers.
4286   Register top_address = scratch;
4287   // This code stores a temporary value in t9.
4288   Register alloc_limit = t9;
4289   li(top_address, Operand(allocation_top));
4290 
4291   if ((flags & RESULT_CONTAINS_TOP) == 0) {
4292     // Load allocation top into result and allocation limit into alloc_limit.
4293     lw(result, MemOperand(top_address));
4294     lw(alloc_limit, MemOperand(top_address, kPointerSize));
4295   } else {
4296     if (emit_debug_code()) {
4297       // Assert that result actually contains top on entry.
4298       lw(alloc_limit, MemOperand(top_address));
4299       Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
4300     }
4301     // Load allocation limit. Result already contains allocation top.
4302     lw(alloc_limit, MemOperand(top_address, limit - top));
4303   }
4304 
4305   if ((flags & DOUBLE_ALIGNMENT) != 0) {
4306     // Align the next allocation. Storing the filler map without checking top is
4307     // safe in new-space because the limit of the heap is aligned there.
4308     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4309     And(result_end, result, Operand(kDoubleAlignmentMask));
4310     Label aligned;
4311     Branch(&aligned, eq, result_end, Operand(zero_reg));
4312     if ((flags & PRETENURE) != 0) {
4313       Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit));
4314     }
4315     li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
4316     sw(result_end, MemOperand(result));
4317     Addu(result, result, Operand(kDoubleSize / 2));
4318     bind(&aligned);
4319   }
4320 
4321   // Calculate new top and bail out if new space is exhausted. Use result
4322   // to calculate the new top. Object size may be in words so a shift is
4323   // required to get the number of bytes.
4324   if ((flags & SIZE_IN_WORDS) != 0) {
4325     Lsa(result_end, result, object_size, kPointerSizeLog2);
4326   } else {
4327     Addu(result_end, result, Operand(object_size));
4328   }
4329 
4330   Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
4331 
4332   // Update allocation top. result temporarily holds the new top.
4333   if (emit_debug_code()) {
4334     And(alloc_limit, result_end, Operand(kObjectAlignmentMask));
4335     Check(eq, kUnalignedAllocationInNewSpace, alloc_limit, Operand(zero_reg));
4336   }
4337 
4338   if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
4339     // The top pointer is not updated for allocation folding dominators.
4340     sw(result_end, MemOperand(top_address));
4341   }
4342 
4343   // Tag object.
4344   Addu(result, result, Operand(kHeapObjectTag));
4345 }
4346 
FastAllocate(int object_size,Register result,Register scratch1,Register scratch2,AllocationFlags flags)4347 void MacroAssembler::FastAllocate(int object_size, Register result,
4348                                   Register scratch1, Register scratch2,
4349                                   AllocationFlags flags) {
4350   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
4351   DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
4352 
4353   // Make object size into bytes.
4354   if ((flags & SIZE_IN_WORDS) != 0) {
4355     object_size *= kPointerSize;
4356   }
4357   DCHECK_EQ(0, object_size & kObjectAlignmentMask);
4358 
4359   ExternalReference allocation_top =
4360       AllocationUtils::GetAllocationTopReference(isolate(), flags);
4361 
4362   // Set up allocation top address and allocation limit registers.
4363   Register top_address = scratch1;
4364   // This code stores a temporary value in t9.
4365   Register result_end = scratch2;
4366   li(top_address, Operand(allocation_top));
4367   lw(result, MemOperand(top_address));
4368 
4369   if ((flags & DOUBLE_ALIGNMENT) != 0) {
4370     // Align the next allocation. Storing the filler map without checking top is
4371     // safe in new-space because the limit of the heap is aligned there.
4372     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4373     And(result_end, result, Operand(kDoubleAlignmentMask));
4374     Label aligned;
4375     Branch(&aligned, eq, result_end, Operand(zero_reg));
4376     li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
4377     sw(result_end, MemOperand(result));
4378     Addu(result, result, Operand(kDoubleSize / 2));
4379     bind(&aligned);
4380   }
4381 
4382   Addu(result_end, result, Operand(object_size));
4383 
4384   // The top pointer is not updated for allocation folding dominators.
4385   sw(result_end, MemOperand(top_address));
4386 
4387   Addu(result, result, Operand(kHeapObjectTag));
4388 }
4389 
FastAllocate(Register object_size,Register result,Register result_end,Register scratch,AllocationFlags flags)4390 void MacroAssembler::FastAllocate(Register object_size, Register result,
4391                                   Register result_end, Register scratch,
4392                                   AllocationFlags flags) {
4393   // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
4394   // is not specified. Other registers must not overlap.
4395   DCHECK(!AreAliased(object_size, result, scratch, t9, at));
4396   DCHECK(!AreAliased(result_end, result, scratch, t9, at));
4397   DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
4398 
4399   ExternalReference allocation_top =
4400       AllocationUtils::GetAllocationTopReference(isolate(), flags);
4401 
4402   // Set up allocation top address and allocation limit registers.
4403   Register top_address = scratch;
4404   // This code stores a temporary value in t9.
4405   li(top_address, Operand(allocation_top));
4406   lw(result, MemOperand(top_address));
4407 
4408   if ((flags & DOUBLE_ALIGNMENT) != 0) {
4409     // Align the next allocation. Storing the filler map without checking top is
4410     // safe in new-space because the limit of the heap is aligned there.
4411     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4412     And(result_end, result, Operand(kDoubleAlignmentMask));
4413     Label aligned;
4414     Branch(&aligned, eq, result_end, Operand(zero_reg));
4415     li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
4416     sw(result_end, MemOperand(result));
4417     Addu(result, result, Operand(kDoubleSize / 2));
4418     bind(&aligned);
4419   }
4420 
4421   // Calculate new top and bail out if new space is exhausted. Use result
4422   // to calculate the new top. Object size may be in words so a shift is
4423   // required to get the number of bytes.
4424   if ((flags & SIZE_IN_WORDS) != 0) {
4425     Lsa(result_end, result, object_size, kPointerSizeLog2);
4426   } else {
4427     Addu(result_end, result, Operand(object_size));
4428   }
4429 
4430   // The top pointer is not updated for allocation folding dominators.
4431   sw(result_end, MemOperand(top_address));
4432 
4433   Addu(result, result, Operand(kHeapObjectTag));
4434 }
4435 
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)4436 void MacroAssembler::AllocateTwoByteString(Register result,
4437                                            Register length,
4438                                            Register scratch1,
4439                                            Register scratch2,
4440                                            Register scratch3,
4441                                            Label* gc_required) {
4442   // Calculate the number of bytes needed for the characters in the string while
4443   // observing object alignment.
4444   DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4445   sll(scratch1, length, 1);  // Length in bytes, not chars.
4446   addiu(scratch1, scratch1,
4447        kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
4448   And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
4449 
4450   // Allocate two-byte string in new space.
4451   Allocate(scratch1, result, scratch2, scratch3, gc_required,
4452            NO_ALLOCATION_FLAGS);
4453 
4454   // Set the map, length and hash field.
4455   InitializeNewString(result,
4456                       length,
4457                       Heap::kStringMapRootIndex,
4458                       scratch1,
4459                       scratch2);
4460 }
4461 
4462 
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)4463 void MacroAssembler::AllocateOneByteString(Register result, Register length,
4464                                            Register scratch1, Register scratch2,
4465                                            Register scratch3,
4466                                            Label* gc_required) {
4467   // Calculate the number of bytes needed for the characters in the string
4468   // while observing object alignment.
4469   DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4470   DCHECK(kCharSize == 1);
4471   addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
4472   And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
4473 
4474   // Allocate one-byte string in new space.
4475   Allocate(scratch1, result, scratch2, scratch3, gc_required,
4476            NO_ALLOCATION_FLAGS);
4477 
4478   // Set the map, length and hash field.
4479   InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
4480                       scratch1, scratch2);
4481 }
4482 
4483 
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)4484 void MacroAssembler::AllocateTwoByteConsString(Register result,
4485                                                Register length,
4486                                                Register scratch1,
4487                                                Register scratch2,
4488                                                Label* gc_required) {
4489   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4490            NO_ALLOCATION_FLAGS);
4491   InitializeNewString(result,
4492                       length,
4493                       Heap::kConsStringMapRootIndex,
4494                       scratch1,
4495                       scratch2);
4496 }
4497 
4498 
AllocateOneByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)4499 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
4500                                                Register scratch1,
4501                                                Register scratch2,
4502                                                Label* gc_required) {
4503   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4504            NO_ALLOCATION_FLAGS);
4505 
4506   InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
4507                       scratch1, scratch2);
4508 }
4509 
4510 
AllocateTwoByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)4511 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4512                                                  Register length,
4513                                                  Register scratch1,
4514                                                  Register scratch2,
4515                                                  Label* gc_required) {
4516   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4517            NO_ALLOCATION_FLAGS);
4518 
4519   InitializeNewString(result,
4520                       length,
4521                       Heap::kSlicedStringMapRootIndex,
4522                       scratch1,
4523                       scratch2);
4524 }
4525 
4526 
AllocateOneByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)4527 void MacroAssembler::AllocateOneByteSlicedString(Register result,
4528                                                  Register length,
4529                                                  Register scratch1,
4530                                                  Register scratch2,
4531                                                  Label* gc_required) {
4532   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4533            NO_ALLOCATION_FLAGS);
4534 
4535   InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
4536                       scratch1, scratch2);
4537 }
4538 
4539 
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)4540 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
4541                                                      Label* not_unique_name) {
4542   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
4543   Label succeed;
4544   And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4545   Branch(&succeed, eq, at, Operand(zero_reg));
4546   Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
4547 
4548   bind(&succeed);
4549 }
4550 
4551 
4552 // Allocates a heap number or jumps to the label if the young space is full and
4553 // a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * need_gc,MutableMode mode)4554 void MacroAssembler::AllocateHeapNumber(Register result,
4555                                         Register scratch1,
4556                                         Register scratch2,
4557                                         Register heap_number_map,
4558                                         Label* need_gc,
4559                                         MutableMode mode) {
4560   // Allocate an object in the heap for the heap number and tag it as a heap
4561   // object.
4562   Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
4563            NO_ALLOCATION_FLAGS);
4564 
4565   Heap::RootListIndex map_index = mode == MUTABLE
4566       ? Heap::kMutableHeapNumberMapRootIndex
4567       : Heap::kHeapNumberMapRootIndex;
4568   AssertIsRoot(heap_number_map, map_index);
4569 
4570   // Store heap number map in the allocated object.
4571   sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
4572 }
4573 
4574 
AllocateHeapNumberWithValue(Register result,FPURegister value,Register scratch1,Register scratch2,Label * gc_required)4575 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
4576                                                  FPURegister value,
4577                                                  Register scratch1,
4578                                                  Register scratch2,
4579                                                  Label* gc_required) {
4580   LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
4581   AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
4582   sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
4583 }
4584 
4585 
AllocateJSValue(Register result,Register constructor,Register value,Register scratch1,Register scratch2,Label * gc_required)4586 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
4587                                      Register value, Register scratch1,
4588                                      Register scratch2, Label* gc_required) {
4589   DCHECK(!result.is(constructor));
4590   DCHECK(!result.is(scratch1));
4591   DCHECK(!result.is(scratch2));
4592   DCHECK(!result.is(value));
4593 
4594   // Allocate JSValue in new space.
4595   Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
4596            NO_ALLOCATION_FLAGS);
4597 
4598   // Initialize the JSValue.
4599   LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
4600   sw(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
4601   LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
4602   sw(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
4603   sw(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
4604   sw(value, FieldMemOperand(result, JSValue::kValueOffset));
4605   STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
4606 }
4607 
4608 
CopyBytes(Register src,Register dst,Register length,Register scratch)4609 void MacroAssembler::CopyBytes(Register src,
4610                                Register dst,
4611                                Register length,
4612                                Register scratch) {
4613   Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
4614 
4615   // Align src before copying in word size chunks.
4616   Branch(&byte_loop, le, length, Operand(kPointerSize));
4617   bind(&align_loop_1);
4618   And(scratch, src, kPointerSize - 1);
4619   Branch(&word_loop, eq, scratch, Operand(zero_reg));
4620   lbu(scratch, MemOperand(src));
4621   Addu(src, src, 1);
4622   sb(scratch, MemOperand(dst));
4623   Addu(dst, dst, 1);
4624   Subu(length, length, Operand(1));
4625   Branch(&align_loop_1, ne, length, Operand(zero_reg));
4626 
4627   // Copy bytes in word size chunks.
4628   bind(&word_loop);
4629   if (emit_debug_code()) {
4630     And(scratch, src, kPointerSize - 1);
4631     Assert(eq, kExpectingAlignmentForCopyBytes,
4632         scratch, Operand(zero_reg));
4633   }
4634   Branch(&byte_loop, lt, length, Operand(kPointerSize));
4635   lw(scratch, MemOperand(src));
4636   Addu(src, src, kPointerSize);
4637 
4638   // TODO(kalmard) check if this can be optimized to use sw in most cases.
4639   // Can't use unaligned access - copy byte by byte.
4640   if (kArchEndian == kLittle) {
4641     sb(scratch, MemOperand(dst, 0));
4642     srl(scratch, scratch, 8);
4643     sb(scratch, MemOperand(dst, 1));
4644     srl(scratch, scratch, 8);
4645     sb(scratch, MemOperand(dst, 2));
4646     srl(scratch, scratch, 8);
4647     sb(scratch, MemOperand(dst, 3));
4648   } else {
4649     sb(scratch, MemOperand(dst, 3));
4650     srl(scratch, scratch, 8);
4651     sb(scratch, MemOperand(dst, 2));
4652     srl(scratch, scratch, 8);
4653     sb(scratch, MemOperand(dst, 1));
4654     srl(scratch, scratch, 8);
4655     sb(scratch, MemOperand(dst, 0));
4656   }
4657 
4658   Addu(dst, dst, 4);
4659 
4660   Subu(length, length, Operand(kPointerSize));
4661   Branch(&word_loop);
4662 
4663   // Copy the last bytes if any left.
4664   bind(&byte_loop);
4665   Branch(&done, eq, length, Operand(zero_reg));
4666   bind(&byte_loop_1);
4667   lbu(scratch, MemOperand(src));
4668   Addu(src, src, 1);
4669   sb(scratch, MemOperand(dst));
4670   Addu(dst, dst, 1);
4671   Subu(length, length, Operand(1));
4672   Branch(&byte_loop_1, ne, length, Operand(zero_reg));
4673   bind(&done);
4674 }
4675 
4676 
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)4677 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
4678                                                 Register end_address,
4679                                                 Register filler) {
4680   Label loop, entry;
4681   Branch(&entry);
4682   bind(&loop);
4683   sw(filler, MemOperand(current_address));
4684   Addu(current_address, current_address, kPointerSize);
4685   bind(&entry);
4686   Branch(&loop, ult, current_address, Operand(end_address));
4687 }
4688 
4689 
CheckFastElements(Register map,Register scratch,Label * fail)4690 void MacroAssembler::CheckFastElements(Register map,
4691                                        Register scratch,
4692                                        Label* fail) {
4693   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4694   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4695   STATIC_ASSERT(FAST_ELEMENTS == 2);
4696   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4697   lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4698   Branch(fail, hi, scratch,
4699          Operand(Map::kMaximumBitField2FastHoleyElementValue));
4700 }
4701 
4702 
CheckFastObjectElements(Register map,Register scratch,Label * fail)4703 void MacroAssembler::CheckFastObjectElements(Register map,
4704                                              Register scratch,
4705                                              Label* fail) {
4706   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4707   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4708   STATIC_ASSERT(FAST_ELEMENTS == 2);
4709   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4710   lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4711   Branch(fail, ls, scratch,
4712          Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
4713   Branch(fail, hi, scratch,
4714          Operand(Map::kMaximumBitField2FastHoleyElementValue));
4715 }
4716 
4717 
CheckFastSmiElements(Register map,Register scratch,Label * fail)4718 void MacroAssembler::CheckFastSmiElements(Register map,
4719                                           Register scratch,
4720                                           Label* fail) {
4721   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4722   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4723   lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4724   Branch(fail, hi, scratch,
4725          Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
4726 }
4727 
4728 
StoreNumberToDoubleElements(Register value_reg,Register key_reg,Register elements_reg,Register scratch1,Register scratch2,Register scratch3,Label * fail,int elements_offset)4729 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
4730                                                  Register key_reg,
4731                                                  Register elements_reg,
4732                                                  Register scratch1,
4733                                                  Register scratch2,
4734                                                  Register scratch3,
4735                                                  Label* fail,
4736                                                  int elements_offset) {
4737   DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2,
4738                      scratch3));
4739   Label smi_value, done;
4740 
4741   // Handle smi values specially.
4742   JumpIfSmi(value_reg, &smi_value);
4743 
4744   // Ensure that the object is a heap number
4745   CheckMap(value_reg,
4746            scratch1,
4747            Heap::kHeapNumberMapRootIndex,
4748            fail,
4749            DONT_DO_SMI_CHECK);
4750 
4751   // Double value, turn potential sNaN into qNan.
4752   DoubleRegister double_result = f0;
4753   DoubleRegister double_scratch = f2;
4754 
4755   ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
4756   Branch(USE_DELAY_SLOT, &done);  // Canonicalization is one instruction.
4757   FPUCanonicalizeNaN(double_result, double_result);
4758 
4759   bind(&smi_value);
4760   Register untagged_value = scratch2;
4761   SmiUntag(untagged_value, value_reg);
4762   mtc1(untagged_value, double_scratch);
4763   cvt_d_w(double_result, double_scratch);
4764 
4765   bind(&done);
4766   Addu(scratch1, elements_reg,
4767       Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
4768               elements_offset));
4769   Lsa(scratch1, scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
4770   // scratch1 is now effective address of the double element
4771   sdc1(double_result, MemOperand(scratch1, 0));
4772 }
4773 
SubNanPreservePayloadAndSign_s(FloatRegister fd,FloatRegister fs,FloatRegister ft)4774 void MacroAssembler::SubNanPreservePayloadAndSign_s(FloatRegister fd,
4775                                                     FloatRegister fs,
4776                                                     FloatRegister ft) {
4777   FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
4778   Label check_nan, save_payload, done;
4779   Register scratch1 = t8;
4780   Register scratch2 = t9;
4781 
4782   sub_s(dest, fs, ft);
4783   // Check if the result of subtraction is NaN.
4784   BranchF32(nullptr, &check_nan, eq, fs, ft);
4785   Branch(USE_DELAY_SLOT, &done);
4786   dest.is(fd) ? nop() : mov_s(fd, dest);
4787 
4788   bind(&check_nan);
4789   // Check if first operand is a NaN.
4790   mfc1(scratch1, fs);
4791   BranchF32(nullptr, &save_payload, eq, fs, fs);
4792   // Second operand must be a NaN.
4793   mfc1(scratch1, ft);
4794 
4795   bind(&save_payload);
4796   // Reserve payload.
4797   And(scratch1, scratch1,
4798       Operand(kSingleSignMask | ((1 << kSingleNaNShift) - 1)));
4799   mfc1(scratch2, dest);
4800   And(scratch2, scratch2, Operand(kSingleNaNMask));
4801   Or(scratch2, scratch2, scratch1);
4802   mtc1(scratch2, fd);
4803 
4804   bind(&done);
4805 }
4806 
SubNanPreservePayloadAndSign_d(DoubleRegister fd,DoubleRegister fs,DoubleRegister ft)4807 void MacroAssembler::SubNanPreservePayloadAndSign_d(DoubleRegister fd,
4808                                                     DoubleRegister fs,
4809                                                     DoubleRegister ft) {
4810   FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
4811   Label check_nan, save_payload, done;
4812   Register scratch1 = t8;
4813   Register scratch2 = t9;
4814 
4815   sub_d(dest, fs, ft);
4816   // Check if the result of subtraction is NaN.
4817   BranchF64(nullptr, &check_nan, eq, fs, ft);
4818   Branch(USE_DELAY_SLOT, &done);
4819   dest.is(fd) ? nop() : mov_d(fd, dest);
4820 
4821   bind(&check_nan);
4822   // Check if first operand is a NaN.
4823   Mfhc1(scratch1, fs);
4824   mov_s(dest, fs);
4825   BranchF64(nullptr, &save_payload, eq, fs, fs);
4826   // Second operand must be a NaN.
4827   Mfhc1(scratch1, ft);
4828   mov_s(dest, ft);
4829 
4830   bind(&save_payload);
4831   // Reserve payload.
4832   And(scratch1, scratch1,
4833       Operand(kDoubleSignMask | ((1 << kDoubleNaNShift) - 1)));
4834   Mfhc1(scratch2, dest);
4835   And(scratch2, scratch2, Operand(kDoubleNaNMask));
4836   Or(scratch2, scratch2, scratch1);
4837   Move_s(fd, dest);
4838   Mthc1(scratch2, fd);
4839 
4840   bind(&done);
4841 }
4842 
CompareMapAndBranch(Register obj,Register scratch,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)4843 void MacroAssembler::CompareMapAndBranch(Register obj,
4844                                          Register scratch,
4845                                          Handle<Map> map,
4846                                          Label* early_success,
4847                                          Condition cond,
4848                                          Label* branch_to) {
4849   lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4850   CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
4851 }
4852 
4853 
CompareMapAndBranch(Register obj_map,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)4854 void MacroAssembler::CompareMapAndBranch(Register obj_map,
4855                                          Handle<Map> map,
4856                                          Label* early_success,
4857                                          Condition cond,
4858                                          Label* branch_to) {
4859   Branch(branch_to, cond, obj_map, Operand(map));
4860 }
4861 
4862 
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)4863 void MacroAssembler::CheckMap(Register obj,
4864                               Register scratch,
4865                               Handle<Map> map,
4866                               Label* fail,
4867                               SmiCheckType smi_check_type) {
4868   if (smi_check_type == DO_SMI_CHECK) {
4869     JumpIfSmi(obj, fail);
4870   }
4871   Label success;
4872   CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
4873   bind(&success);
4874 }
4875 
4876 
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)4877 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
4878                                      Register scratch2, Handle<WeakCell> cell,
4879                                      Handle<Code> success,
4880                                      SmiCheckType smi_check_type) {
4881   Label fail;
4882   if (smi_check_type == DO_SMI_CHECK) {
4883     JumpIfSmi(obj, &fail);
4884   }
4885   lw(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
4886   GetWeakValue(scratch2, cell);
4887   Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
4888   bind(&fail);
4889 }
4890 
4891 
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)4892 void MacroAssembler::CheckMap(Register obj,
4893                               Register scratch,
4894                               Heap::RootListIndex index,
4895                               Label* fail,
4896                               SmiCheckType smi_check_type) {
4897   if (smi_check_type == DO_SMI_CHECK) {
4898     JumpIfSmi(obj, fail);
4899   }
4900   lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4901   LoadRoot(at, index);
4902   Branch(fail, ne, scratch, Operand(at));
4903 }
4904 
FPUCanonicalizeNaN(const DoubleRegister dst,const DoubleRegister src)4905 void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
4906                                         const DoubleRegister src) {
4907   sub_d(dst, src, kDoubleRegZero);
4908 }
4909 
GetWeakValue(Register value,Handle<WeakCell> cell)4910 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
4911   li(value, Operand(cell));
4912   lw(value, FieldMemOperand(value, WeakCell::kValueOffset));
4913 }
4914 
4915 
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)4916 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
4917                                    Label* miss) {
4918   GetWeakValue(value, cell);
4919   JumpIfSmi(value, miss);
4920 }
4921 
4922 
MovFromFloatResult(DoubleRegister dst)4923 void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
4924   if (IsMipsSoftFloatABI) {
4925     if (kArchEndian == kLittle) {
4926       Move(dst, v0, v1);
4927     } else {
4928       Move(dst, v1, v0);
4929     }
4930   } else {
4931     Move(dst, f0);  // Reg f0 is o32 ABI FP return value.
4932   }
4933 }
4934 
4935 
MovFromFloatParameter(DoubleRegister dst)4936 void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
4937   if (IsMipsSoftFloatABI) {
4938     if (kArchEndian == kLittle) {
4939       Move(dst, a0, a1);
4940     } else {
4941       Move(dst, a1, a0);
4942     }
4943   } else {
4944     Move(dst, f12);  // Reg f12 is o32 ABI FP first argument value.
4945   }
4946 }
4947 
4948 
MovToFloatParameter(DoubleRegister src)4949 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
4950   if (!IsMipsSoftFloatABI) {
4951     Move(f12, src);
4952   } else {
4953     if (kArchEndian == kLittle) {
4954       Move(a0, a1, src);
4955     } else {
4956       Move(a1, a0, src);
4957     }
4958   }
4959 }
4960 
4961 
MovToFloatResult(DoubleRegister src)4962 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
4963   if (!IsMipsSoftFloatABI) {
4964     Move(f0, src);
4965   } else {
4966     if (kArchEndian == kLittle) {
4967       Move(v0, v1, src);
4968     } else {
4969       Move(v1, v0, src);
4970     }
4971   }
4972 }
4973 
4974 
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)4975 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
4976                                           DoubleRegister src2) {
4977   if (!IsMipsSoftFloatABI) {
4978     if (src2.is(f12)) {
4979       DCHECK(!src1.is(f14));
4980       Move(f14, src2);
4981       Move(f12, src1);
4982     } else {
4983       Move(f12, src1);
4984       Move(f14, src2);
4985     }
4986   } else {
4987     if (kArchEndian == kLittle) {
4988       Move(a0, a1, src1);
4989       Move(a2, a3, src2);
4990     } else {
4991       Move(a1, a0, src1);
4992       Move(a3, a2, src2);
4993     }
4994   }
4995 }
4996 
4997 
4998 // -----------------------------------------------------------------------------
4999 // JavaScript invokes.
5000 
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)5001 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
5002                                         Register caller_args_count_reg,
5003                                         Register scratch0, Register scratch1) {
5004 #if DEBUG
5005   if (callee_args_count.is_reg()) {
5006     DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
5007                        scratch1));
5008   } else {
5009     DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
5010   }
5011 #endif
5012 
5013   // Calculate the end of destination area where we will put the arguments
5014   // after we drop current frame. We add kPointerSize to count the receiver
5015   // argument which is not included into formal parameters count.
5016   Register dst_reg = scratch0;
5017   Lsa(dst_reg, fp, caller_args_count_reg, kPointerSizeLog2);
5018   Addu(dst_reg, dst_reg,
5019        Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
5020 
5021   Register src_reg = caller_args_count_reg;
5022   // Calculate the end of source area. +kPointerSize is for the receiver.
5023   if (callee_args_count.is_reg()) {
5024     Lsa(src_reg, sp, callee_args_count.reg(), kPointerSizeLog2);
5025     Addu(src_reg, src_reg, Operand(kPointerSize));
5026   } else {
5027     Addu(src_reg, sp,
5028          Operand((callee_args_count.immediate() + 1) * kPointerSize));
5029   }
5030 
5031   if (FLAG_debug_code) {
5032     Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
5033   }
5034 
5035   // Restore caller's frame pointer and return address now as they will be
5036   // overwritten by the copying loop.
5037   lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
5038   lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5039 
5040   // Now copy callee arguments to the caller frame going backwards to avoid
5041   // callee arguments corruption (source and destination areas could overlap).
5042 
5043   // Both src_reg and dst_reg are pointing to the word after the one to copy,
5044   // so they must be pre-decremented in the loop.
5045   Register tmp_reg = scratch1;
5046   Label loop, entry;
5047   Branch(&entry);
5048   bind(&loop);
5049   Subu(src_reg, src_reg, Operand(kPointerSize));
5050   Subu(dst_reg, dst_reg, Operand(kPointerSize));
5051   lw(tmp_reg, MemOperand(src_reg));
5052   sw(tmp_reg, MemOperand(dst_reg));
5053   bind(&entry);
5054   Branch(&loop, ne, sp, Operand(src_reg));
5055 
5056   // Leave current frame.
5057   mov(sp, dst_reg);
5058 }
5059 
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)5060 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
5061                                     const ParameterCount& actual,
5062                                     Label* done,
5063                                     bool* definitely_mismatches,
5064                                     InvokeFlag flag,
5065                                     const CallWrapper& call_wrapper) {
5066   bool definitely_matches = false;
5067   *definitely_mismatches = false;
5068   Label regular_invoke;
5069 
5070   // Check whether the expected and actual arguments count match. If not,
5071   // setup registers according to contract with ArgumentsAdaptorTrampoline:
5072   //  a0: actual arguments count
5073   //  a1: function (passed through to callee)
5074   //  a2: expected arguments count
5075 
5076   // The code below is made a lot easier because the calling code already sets
5077   // up actual and expected registers according to the contract if values are
5078   // passed in registers.
5079   DCHECK(actual.is_immediate() || actual.reg().is(a0));
5080   DCHECK(expected.is_immediate() || expected.reg().is(a2));
5081 
5082   if (expected.is_immediate()) {
5083     DCHECK(actual.is_immediate());
5084     li(a0, Operand(actual.immediate()));
5085     if (expected.immediate() == actual.immediate()) {
5086       definitely_matches = true;
5087     } else {
5088       const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
5089       if (expected.immediate() == sentinel) {
5090         // Don't worry about adapting arguments for builtins that
5091         // don't want that done. Skip adaption code by making it look
5092         // like we have a match between expected and actual number of
5093         // arguments.
5094         definitely_matches = true;
5095       } else {
5096         *definitely_mismatches = true;
5097         li(a2, Operand(expected.immediate()));
5098       }
5099     }
5100   } else if (actual.is_immediate()) {
5101     li(a0, Operand(actual.immediate()));
5102     Branch(&regular_invoke, eq, expected.reg(), Operand(a0));
5103   } else {
5104     Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
5105   }
5106 
5107   if (!definitely_matches) {
5108     Handle<Code> adaptor =
5109         isolate()->builtins()->ArgumentsAdaptorTrampoline();
5110     if (flag == CALL_FUNCTION) {
5111       call_wrapper.BeforeCall(CallSize(adaptor));
5112       Call(adaptor);
5113       call_wrapper.AfterCall();
5114       if (!*definitely_mismatches) {
5115         Branch(done);
5116       }
5117     } else {
5118       Jump(adaptor, RelocInfo::CODE_TARGET);
5119     }
5120     bind(&regular_invoke);
5121   }
5122 }
5123 
5124 
FloodFunctionIfStepping(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)5125 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
5126                                              const ParameterCount& expected,
5127                                              const ParameterCount& actual) {
5128   Label skip_flooding;
5129   ExternalReference last_step_action =
5130       ExternalReference::debug_last_step_action_address(isolate());
5131   STATIC_ASSERT(StepFrame > StepIn);
5132   li(t0, Operand(last_step_action));
5133   lb(t0, MemOperand(t0));
5134   Branch(&skip_flooding, lt, t0, Operand(StepIn));
5135   {
5136     FrameScope frame(this,
5137                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
5138     if (expected.is_reg()) {
5139       SmiTag(expected.reg());
5140       Push(expected.reg());
5141     }
5142     if (actual.is_reg()) {
5143       SmiTag(actual.reg());
5144       Push(actual.reg());
5145     }
5146     if (new_target.is_valid()) {
5147       Push(new_target);
5148     }
5149     Push(fun);
5150     Push(fun);
5151     CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
5152     Pop(fun);
5153     if (new_target.is_valid()) {
5154       Pop(new_target);
5155     }
5156     if (actual.is_reg()) {
5157       Pop(actual.reg());
5158       SmiUntag(actual.reg());
5159     }
5160     if (expected.is_reg()) {
5161       Pop(expected.reg());
5162       SmiUntag(expected.reg());
5163     }
5164   }
5165   bind(&skip_flooding);
5166 }
5167 
5168 
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)5169 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
5170                                         const ParameterCount& expected,
5171                                         const ParameterCount& actual,
5172                                         InvokeFlag flag,
5173                                         const CallWrapper& call_wrapper) {
5174   // You can't call a function without a valid frame.
5175   DCHECK(flag == JUMP_FUNCTION || has_frame());
5176   DCHECK(function.is(a1));
5177   DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
5178 
5179   if (call_wrapper.NeedsDebugStepCheck()) {
5180     FloodFunctionIfStepping(function, new_target, expected, actual);
5181   }
5182 
5183   // Clear the new.target register if not given.
5184   if (!new_target.is_valid()) {
5185     LoadRoot(a3, Heap::kUndefinedValueRootIndex);
5186   }
5187 
5188   Label done;
5189   bool definitely_mismatches = false;
5190   InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
5191                  call_wrapper);
5192   if (!definitely_mismatches) {
5193     // We call indirectly through the code field in the function to
5194     // allow recompilation to take effect without changing any of the
5195     // call sites.
5196     Register code = t0;
5197     lw(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
5198     if (flag == CALL_FUNCTION) {
5199       call_wrapper.BeforeCall(CallSize(code));
5200       Call(code);
5201       call_wrapper.AfterCall();
5202     } else {
5203       DCHECK(flag == JUMP_FUNCTION);
5204       Jump(code);
5205     }
5206     // Continue here if InvokePrologue does handle the invocation due to
5207     // mismatched parameter counts.
5208     bind(&done);
5209   }
5210 }
5211 
5212 
InvokeFunction(Register function,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)5213 void MacroAssembler::InvokeFunction(Register function,
5214                                     Register new_target,
5215                                     const ParameterCount& actual,
5216                                     InvokeFlag flag,
5217                                     const CallWrapper& call_wrapper) {
5218   // You can't call a function without a valid frame.
5219   DCHECK(flag == JUMP_FUNCTION || has_frame());
5220 
5221   // Contract with called JS functions requires that function is passed in a1.
5222   DCHECK(function.is(a1));
5223   Register expected_reg = a2;
5224   Register temp_reg = t0;
5225 
5226   lw(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
5227   lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
5228   lw(expected_reg,
5229      FieldMemOperand(temp_reg,
5230                      SharedFunctionInfo::kFormalParameterCountOffset));
5231   sra(expected_reg, expected_reg, kSmiTagSize);
5232 
5233   ParameterCount expected(expected_reg);
5234   InvokeFunctionCode(function, new_target, expected, actual, flag,
5235                      call_wrapper);
5236 }
5237 
5238 
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)5239 void MacroAssembler::InvokeFunction(Register function,
5240                                     const ParameterCount& expected,
5241                                     const ParameterCount& actual,
5242                                     InvokeFlag flag,
5243                                     const CallWrapper& call_wrapper) {
5244   // You can't call a function without a valid frame.
5245   DCHECK(flag == JUMP_FUNCTION || has_frame());
5246 
5247   // Contract with called JS functions requires that function is passed in a1.
5248   DCHECK(function.is(a1));
5249 
5250   // Get the function and setup the context.
5251   lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
5252 
5253   InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
5254 }
5255 
5256 
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)5257 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
5258                                     const ParameterCount& expected,
5259                                     const ParameterCount& actual,
5260                                     InvokeFlag flag,
5261                                     const CallWrapper& call_wrapper) {
5262   li(a1, function);
5263   InvokeFunction(a1, expected, actual, flag, call_wrapper);
5264 }
5265 
5266 
IsObjectJSStringType(Register object,Register scratch,Label * fail)5267 void MacroAssembler::IsObjectJSStringType(Register object,
5268                                           Register scratch,
5269                                           Label* fail) {
5270   DCHECK(kNotStringTag != 0);
5271 
5272   lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5273   lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5274   And(scratch, scratch, Operand(kIsNotStringMask));
5275   Branch(fail, ne, scratch, Operand(zero_reg));
5276 }
5277 
5278 
IsObjectNameType(Register object,Register scratch,Label * fail)5279 void MacroAssembler::IsObjectNameType(Register object,
5280                                       Register scratch,
5281                                       Label* fail) {
5282   lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5283   lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5284   Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
5285 }
5286 
5287 
5288 // ---------------------------------------------------------------------------
5289 // Support functions.
5290 
5291 
GetMapConstructor(Register result,Register map,Register temp,Register temp2)5292 void MacroAssembler::GetMapConstructor(Register result, Register map,
5293                                        Register temp, Register temp2) {
5294   Label done, loop;
5295   lw(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
5296   bind(&loop);
5297   JumpIfSmi(result, &done);
5298   GetObjectType(result, temp, temp2);
5299   Branch(&done, ne, temp2, Operand(MAP_TYPE));
5300   lw(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
5301   Branch(&loop);
5302   bind(&done);
5303 }
5304 
5305 
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss)5306 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
5307                                              Register scratch, Label* miss) {
5308   // Get the prototype or initial map from the function.
5309   lw(result,
5310      FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5311 
5312   // If the prototype or initial map is the hole, don't return it and
5313   // simply miss the cache instead. This will allow us to allocate a
5314   // prototype object on-demand in the runtime system.
5315   LoadRoot(t8, Heap::kTheHoleValueRootIndex);
5316   Branch(miss, eq, result, Operand(t8));
5317 
5318   // If the function does not have an initial map, we're done.
5319   Label done;
5320   GetObjectType(result, scratch, scratch);
5321   Branch(&done, ne, scratch, Operand(MAP_TYPE));
5322 
5323   // Get the prototype from the initial map.
5324   lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
5325 
5326   // All done.
5327   bind(&done);
5328 }
5329 
5330 
GetObjectType(Register object,Register map,Register type_reg)5331 void MacroAssembler::GetObjectType(Register object,
5332                                    Register map,
5333                                    Register type_reg) {
5334   lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
5335   lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
5336 }
5337 
5338 
5339 // -----------------------------------------------------------------------------
5340 // Runtime calls.
5341 
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)5342 void MacroAssembler::CallStub(CodeStub* stub,
5343                               TypeFeedbackId ast_id,
5344                               Condition cond,
5345                               Register r1,
5346                               const Operand& r2,
5347                               BranchDelaySlot bd) {
5348   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
5349   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
5350        cond, r1, r2, bd);
5351 }
5352 
5353 
TailCallStub(CodeStub * stub,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)5354 void MacroAssembler::TailCallStub(CodeStub* stub,
5355                                   Condition cond,
5356                                   Register r1,
5357                                   const Operand& r2,
5358                                   BranchDelaySlot bd) {
5359   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
5360 }
5361 
5362 
AllowThisStubCall(CodeStub * stub)5363 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
5364   return has_frame_ || !stub->SometimesSetsUpAFrame();
5365 }
5366 
5367 
IndexFromHash(Register hash,Register index)5368 void MacroAssembler::IndexFromHash(Register hash, Register index) {
5369   // If the hash field contains an array index pick it out. The assert checks
5370   // that the constants for the maximum number of digits for an array index
5371   // cached in the hash field and the number of bits reserved for it does not
5372   // conflict.
5373   DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
5374          (1 << String::kArrayIndexValueBits));
5375   DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
5376 }
5377 
5378 
ObjectToDoubleFPURegister(Register object,FPURegister result,Register scratch1,Register scratch2,Register heap_number_map,Label * not_number,ObjectToDoubleFlags flags)5379 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
5380                                                FPURegister result,
5381                                                Register scratch1,
5382                                                Register scratch2,
5383                                                Register heap_number_map,
5384                                                Label* not_number,
5385                                                ObjectToDoubleFlags flags) {
5386   Label done;
5387   if ((flags & OBJECT_NOT_SMI) == 0) {
5388     Label not_smi;
5389     JumpIfNotSmi(object, &not_smi);
5390     // Remove smi tag and convert to double.
5391     sra(scratch1, object, kSmiTagSize);
5392     mtc1(scratch1, result);
5393     cvt_d_w(result, result);
5394     Branch(&done);
5395     bind(&not_smi);
5396   }
5397   // Check for heap number and load double value from it.
5398   lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
5399   Branch(not_number, ne, scratch1, Operand(heap_number_map));
5400 
5401   if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
5402     // If exponent is all ones the number is either a NaN or +/-Infinity.
5403     Register exponent = scratch1;
5404     Register mask_reg = scratch2;
5405     lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
5406     li(mask_reg, HeapNumber::kExponentMask);
5407 
5408     And(exponent, exponent, mask_reg);
5409     Branch(not_number, eq, exponent, Operand(mask_reg));
5410   }
5411   ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
5412   bind(&done);
5413 }
5414 
5415 
SmiToDoubleFPURegister(Register smi,FPURegister value,Register scratch1)5416 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
5417                                             FPURegister value,
5418                                             Register scratch1) {
5419   sra(scratch1, smi, kSmiTagSize);
5420   mtc1(scratch1, value);
5421   cvt_d_w(value, value);
5422 }
5423 
5424 
BranchOvfHelper(MacroAssembler * masm,Register overflow_dst,Label * overflow_label,Label * no_overflow_label)5425 static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
5426                                    Label* overflow_label,
5427                                    Label* no_overflow_label) {
5428   DCHECK(overflow_label || no_overflow_label);
5429   if (!overflow_label) {
5430     DCHECK(no_overflow_label);
5431     masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
5432   } else {
5433     masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
5434     if (no_overflow_label) masm->Branch(no_overflow_label);
5435   }
5436 }
5437 
5438 
AddBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)5439 void MacroAssembler::AddBranchOvf(Register dst, Register left,
5440                                   const Operand& right, Label* overflow_label,
5441                                   Label* no_overflow_label, Register scratch) {
5442   if (right.is_reg()) {
5443     AddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5444                  scratch);
5445   } else {
5446     if (IsMipsArchVariant(kMips32r6)) {
5447       Register right_reg = t9;
5448       DCHECK(!left.is(right_reg));
5449       li(right_reg, Operand(right));
5450       AddBranchOvf(dst, left, right_reg, overflow_label, no_overflow_label);
5451     } else {
5452       Register overflow_dst = t9;
5453       DCHECK(!dst.is(scratch));
5454       DCHECK(!dst.is(overflow_dst));
5455       DCHECK(!scratch.is(overflow_dst));
5456       DCHECK(!left.is(overflow_dst));
5457       if (dst.is(left)) {
5458         mov(scratch, left);                  // Preserve left.
5459         Addu(dst, left, right.immediate());  // Left is overwritten.
5460         xor_(scratch, dst, scratch);         // Original left.
5461         // Load right since xori takes uint16 as immediate.
5462         Addu(overflow_dst, zero_reg, right);
5463         xor_(overflow_dst, dst, overflow_dst);
5464         and_(overflow_dst, overflow_dst, scratch);
5465       } else {
5466         Addu(dst, left, right.immediate());
5467         xor_(overflow_dst, dst, left);
5468         // Load right since xori takes uint16 as immediate.
5469         Addu(scratch, zero_reg, right);
5470         xor_(scratch, dst, scratch);
5471         and_(overflow_dst, scratch, overflow_dst);
5472       }
5473       BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5474     }
5475   }
5476 }
5477 
5478 
AddBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)5479 void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
5480                                   Label* overflow_label,
5481                                   Label* no_overflow_label, Register scratch) {
5482   if (IsMipsArchVariant(kMips32r6)) {
5483     if (!overflow_label) {
5484       DCHECK(no_overflow_label);
5485       DCHECK(!dst.is(scratch));
5486       Register left_reg = left.is(dst) ? scratch : left;
5487       Register right_reg = right.is(dst) ? t9 : right;
5488       DCHECK(!dst.is(left_reg));
5489       DCHECK(!dst.is(right_reg));
5490       Move(left_reg, left);
5491       Move(right_reg, right);
5492       addu(dst, left, right);
5493       bnvc(left_reg, right_reg, no_overflow_label);
5494     } else {
5495       bovc(left, right, overflow_label);
5496       addu(dst, left, right);
5497       if (no_overflow_label) bc(no_overflow_label);
5498     }
5499   } else {
5500     Register overflow_dst = t9;
5501     DCHECK(!dst.is(scratch));
5502     DCHECK(!dst.is(overflow_dst));
5503     DCHECK(!scratch.is(overflow_dst));
5504     DCHECK(!left.is(overflow_dst));
5505     DCHECK(!right.is(overflow_dst));
5506     DCHECK(!left.is(scratch));
5507     DCHECK(!right.is(scratch));
5508 
5509     if (left.is(right) && dst.is(left)) {
5510       mov(overflow_dst, right);
5511       right = overflow_dst;
5512     }
5513 
5514     if (dst.is(left)) {
5515       mov(scratch, left);           // Preserve left.
5516       addu(dst, left, right);       // Left is overwritten.
5517       xor_(scratch, dst, scratch);  // Original left.
5518       xor_(overflow_dst, dst, right);
5519       and_(overflow_dst, overflow_dst, scratch);
5520     } else if (dst.is(right)) {
5521       mov(scratch, right);          // Preserve right.
5522       addu(dst, left, right);       // Right is overwritten.
5523       xor_(scratch, dst, scratch);  // Original right.
5524       xor_(overflow_dst, dst, left);
5525       and_(overflow_dst, overflow_dst, scratch);
5526     } else {
5527       addu(dst, left, right);
5528       xor_(overflow_dst, dst, left);
5529       xor_(scratch, dst, right);
5530       and_(overflow_dst, scratch, overflow_dst);
5531     }
5532     BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5533   }
5534 }
5535 
5536 
SubBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)5537 void MacroAssembler::SubBranchOvf(Register dst, Register left,
5538                                   const Operand& right, Label* overflow_label,
5539                                   Label* no_overflow_label, Register scratch) {
5540   DCHECK(overflow_label || no_overflow_label);
5541   if (right.is_reg()) {
5542     SubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5543                  scratch);
5544   } else {
5545     Register overflow_dst = t9;
5546     DCHECK(!dst.is(scratch));
5547     DCHECK(!dst.is(overflow_dst));
5548     DCHECK(!scratch.is(overflow_dst));
5549     DCHECK(!left.is(overflow_dst));
5550     DCHECK(!left.is(scratch));
5551     if (dst.is(left)) {
5552       mov(scratch, left);                      // Preserve left.
5553       Subu(dst, left, right.immediate());      // Left is overwritten.
5554       // Load right since xori takes uint16 as immediate.
5555       Addu(overflow_dst, zero_reg, right);
5556       xor_(overflow_dst, scratch, overflow_dst);  // scratch is original left.
5557       xor_(scratch, dst, scratch);                // scratch is original left.
5558       and_(overflow_dst, scratch, overflow_dst);
5559     } else {
5560       Subu(dst, left, right);
5561       xor_(overflow_dst, dst, left);
5562       // Load right since xori takes uint16 as immediate.
5563       Addu(scratch, zero_reg, right);
5564       xor_(scratch, left, scratch);
5565       and_(overflow_dst, scratch, overflow_dst);
5566     }
5567     BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5568   }
5569 }
5570 
5571 
SubBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)5572 void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right,
5573                                   Label* overflow_label,
5574                                   Label* no_overflow_label, Register scratch) {
5575   DCHECK(overflow_label || no_overflow_label);
5576   Register overflow_dst = t9;
5577   DCHECK(!dst.is(scratch));
5578   DCHECK(!dst.is(overflow_dst));
5579   DCHECK(!scratch.is(overflow_dst));
5580   DCHECK(!overflow_dst.is(left));
5581   DCHECK(!overflow_dst.is(right));
5582   DCHECK(!scratch.is(left));
5583   DCHECK(!scratch.is(right));
5584 
5585   // This happens with some crankshaft code. Since Subu works fine if
5586   // left == right, let's not make that restriction here.
5587   if (left.is(right)) {
5588     mov(dst, zero_reg);
5589     if (no_overflow_label) {
5590       Branch(no_overflow_label);
5591     }
5592   }
5593 
5594   if (dst.is(left)) {
5595     mov(scratch, left);  // Preserve left.
5596     subu(dst, left, right);  // Left is overwritten.
5597     xor_(overflow_dst, dst, scratch);  // scratch is original left.
5598     xor_(scratch, scratch, right);  // scratch is original left.
5599     and_(overflow_dst, scratch, overflow_dst);
5600   } else if (dst.is(right)) {
5601     mov(scratch, right);  // Preserve right.
5602     subu(dst, left, right);  // Right is overwritten.
5603     xor_(overflow_dst, dst, left);
5604     xor_(scratch, left, scratch);  // Original right.
5605     and_(overflow_dst, scratch, overflow_dst);
5606   } else {
5607     subu(dst, left, right);
5608     xor_(overflow_dst, dst, left);
5609     xor_(scratch, left, right);
5610     and_(overflow_dst, scratch, overflow_dst);
5611   }
5612   BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5613 }
5614 
5615 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles,BranchDelaySlot bd)5616 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
5617                                  SaveFPRegsMode save_doubles,
5618                                  BranchDelaySlot bd) {
5619   // All parameters are on the stack. v0 has the return value after call.
5620 
5621   // If the expected number of arguments of the runtime function is
5622   // constant, we check that the actual number of arguments match the
5623   // expectation.
5624   CHECK(f->nargs < 0 || f->nargs == num_arguments);
5625 
5626   // TODO(1236192): Most runtime routines don't need the number of
5627   // arguments passed in because it is constant. At some point we
5628   // should remove this need and make the runtime routine entry code
5629   // smarter.
5630   PrepareCEntryArgs(num_arguments);
5631   PrepareCEntryFunction(ExternalReference(f, isolate()));
5632   CEntryStub stub(isolate(), 1, save_doubles);
5633   CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
5634 }
5635 
5636 
CallExternalReference(const ExternalReference & ext,int num_arguments,BranchDelaySlot bd)5637 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
5638                                            int num_arguments,
5639                                            BranchDelaySlot bd) {
5640   PrepareCEntryArgs(num_arguments);
5641   PrepareCEntryFunction(ext);
5642 
5643   CEntryStub stub(isolate(), 1);
5644   CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
5645 }
5646 
5647 
TailCallRuntime(Runtime::FunctionId fid)5648 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
5649   const Runtime::Function* function = Runtime::FunctionForId(fid);
5650   DCHECK_EQ(1, function->result_size);
5651   if (function->nargs >= 0) {
5652     PrepareCEntryArgs(function->nargs);
5653   }
5654   JumpToExternalReference(ExternalReference(fid, isolate()));
5655 }
5656 
5657 
JumpToExternalReference(const ExternalReference & builtin,BranchDelaySlot bd)5658 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
5659                                              BranchDelaySlot bd) {
5660   PrepareCEntryFunction(builtin);
5661   CEntryStub stub(isolate(), 1);
5662   Jump(stub.GetCode(),
5663        RelocInfo::CODE_TARGET,
5664        al,
5665        zero_reg,
5666        Operand(zero_reg),
5667        bd);
5668 }
5669 
5670 
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)5671 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
5672                                 Register scratch1, Register scratch2) {
5673   if (FLAG_native_code_counters && counter->Enabled()) {
5674     li(scratch1, Operand(value));
5675     li(scratch2, Operand(ExternalReference(counter)));
5676     sw(scratch1, MemOperand(scratch2));
5677   }
5678 }
5679 
5680 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)5681 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
5682                                       Register scratch1, Register scratch2) {
5683   DCHECK(value > 0);
5684   if (FLAG_native_code_counters && counter->Enabled()) {
5685     li(scratch2, Operand(ExternalReference(counter)));
5686     lw(scratch1, MemOperand(scratch2));
5687     Addu(scratch1, scratch1, Operand(value));
5688     sw(scratch1, MemOperand(scratch2));
5689   }
5690 }
5691 
5692 
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)5693 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
5694                                       Register scratch1, Register scratch2) {
5695   DCHECK(value > 0);
5696   if (FLAG_native_code_counters && counter->Enabled()) {
5697     li(scratch2, Operand(ExternalReference(counter)));
5698     lw(scratch1, MemOperand(scratch2));
5699     Subu(scratch1, scratch1, Operand(value));
5700     sw(scratch1, MemOperand(scratch2));
5701   }
5702 }
5703 
5704 
5705 // -----------------------------------------------------------------------------
5706 // Debugging.
5707 
Assert(Condition cc,BailoutReason reason,Register rs,Operand rt)5708 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
5709                             Register rs, Operand rt) {
5710   if (emit_debug_code())
5711     Check(cc, reason, rs, rt);
5712 }
5713 
5714 
AssertFastElements(Register elements)5715 void MacroAssembler::AssertFastElements(Register elements) {
5716   if (emit_debug_code()) {
5717     DCHECK(!elements.is(at));
5718     Label ok;
5719     push(elements);
5720     lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
5721     LoadRoot(at, Heap::kFixedArrayMapRootIndex);
5722     Branch(&ok, eq, elements, Operand(at));
5723     LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
5724     Branch(&ok, eq, elements, Operand(at));
5725     LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
5726     Branch(&ok, eq, elements, Operand(at));
5727     Abort(kJSObjectWithFastElementsMapHasSlowElements);
5728     bind(&ok);
5729     pop(elements);
5730   }
5731 }
5732 
5733 
Check(Condition cc,BailoutReason reason,Register rs,Operand rt)5734 void MacroAssembler::Check(Condition cc, BailoutReason reason,
5735                            Register rs, Operand rt) {
5736   Label L;
5737   Branch(&L, cc, rs, rt);
5738   Abort(reason);
5739   // Will not return here.
5740   bind(&L);
5741 }
5742 
5743 
Abort(BailoutReason reason)5744 void MacroAssembler::Abort(BailoutReason reason) {
5745   Label abort_start;
5746   bind(&abort_start);
5747 #ifdef DEBUG
5748   const char* msg = GetBailoutReason(reason);
5749   if (msg != NULL) {
5750     RecordComment("Abort message: ");
5751     RecordComment(msg);
5752   }
5753 
5754   if (FLAG_trap_on_abort) {
5755     stop(msg);
5756     return;
5757   }
5758 #endif
5759 
5760   li(a0, Operand(Smi::FromInt(reason)));
5761   push(a0);
5762   // Disable stub call restrictions to always allow calls to abort.
5763   if (!has_frame_) {
5764     // We don't actually want to generate a pile of code for this, so just
5765     // claim there is a stack frame, without generating one.
5766     FrameScope scope(this, StackFrame::NONE);
5767     CallRuntime(Runtime::kAbort);
5768   } else {
5769     CallRuntime(Runtime::kAbort);
5770   }
5771   // Will not return here.
5772   if (is_trampoline_pool_blocked()) {
5773     // If the calling code cares about the exact number of
5774     // instructions generated, we insert padding here to keep the size
5775     // of the Abort macro constant.
5776     // Currently in debug mode with debug_code enabled the number of
5777     // generated instructions is 10, so we use this as a maximum value.
5778     static const int kExpectedAbortInstructions = 10;
5779     int abort_instructions = InstructionsGeneratedSince(&abort_start);
5780     DCHECK(abort_instructions <= kExpectedAbortInstructions);
5781     while (abort_instructions++ < kExpectedAbortInstructions) {
5782       nop();
5783     }
5784   }
5785 }
5786 
5787 
LoadContext(Register dst,int context_chain_length)5788 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
5789   if (context_chain_length > 0) {
5790     // Move up the chain of contexts to the context containing the slot.
5791     lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5792     for (int i = 1; i < context_chain_length; i++) {
5793       lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5794     }
5795   } else {
5796     // Slot is in the current function context.  Move it into the
5797     // destination register in case we store into it (the write barrier
5798     // cannot be allowed to destroy the context in esi).
5799     Move(dst, cp);
5800   }
5801 }
5802 
5803 
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)5804 void MacroAssembler::LoadTransitionedArrayMapConditional(
5805     ElementsKind expected_kind,
5806     ElementsKind transitioned_kind,
5807     Register map_in_out,
5808     Register scratch,
5809     Label* no_map_match) {
5810   DCHECK(IsFastElementsKind(expected_kind));
5811   DCHECK(IsFastElementsKind(transitioned_kind));
5812 
5813   // Check that the function's map is the same as the expected cached map.
5814   lw(scratch, NativeContextMemOperand());
5815   lw(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
5816   Branch(no_map_match, ne, map_in_out, Operand(at));
5817 
5818   // Use the transitioned cached map.
5819   lw(map_in_out,
5820      ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
5821 }
5822 
5823 
LoadNativeContextSlot(int index,Register dst)5824 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
5825   lw(dst, NativeContextMemOperand());
5826   lw(dst, ContextMemOperand(dst, index));
5827 }
5828 
5829 
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)5830 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
5831                                                   Register map,
5832                                                   Register scratch) {
5833   // Load the initial map. The global functions all have initial maps.
5834   lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5835   if (emit_debug_code()) {
5836     Label ok, fail;
5837     CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
5838     Branch(&ok);
5839     bind(&fail);
5840     Abort(kGlobalFunctionsMustHaveInitialMap);
5841     bind(&ok);
5842   }
5843 }
5844 
StubPrologue(StackFrame::Type type)5845 void MacroAssembler::StubPrologue(StackFrame::Type type) {
5846   li(at, Operand(Smi::FromInt(type)));
5847   PushCommonFrame(at);
5848 }
5849 
5850 
Prologue(bool code_pre_aging)5851 void MacroAssembler::Prologue(bool code_pre_aging) {
5852   PredictableCodeSizeScope predictible_code_size_scope(
5853       this, kNoCodeAgeSequenceLength);
5854   // The following three instructions must remain together and unmodified
5855   // for code aging to work properly.
5856   if (code_pre_aging) {
5857     // Pre-age the code.
5858     Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
5859     nop(Assembler::CODE_AGE_MARKER_NOP);
5860     // Load the stub address to t9 and call it,
5861     // GetCodeAgeAndParity() extracts the stub address from this instruction.
5862     li(t9,
5863        Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
5864        CONSTANT_SIZE);
5865     nop();  // Prevent jalr to jal optimization.
5866     jalr(t9, a0);
5867     nop();  // Branch delay slot nop.
5868     nop();  // Pad the empty space.
5869   } else {
5870     PushStandardFrame(a1);
5871     nop(Assembler::CODE_AGE_SEQUENCE_NOP);
5872   }
5873 }
5874 
5875 
EmitLoadTypeFeedbackVector(Register vector)5876 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
5877   lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5878   lw(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
5879   lw(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
5880 }
5881 
5882 
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)5883 void MacroAssembler::EnterFrame(StackFrame::Type type,
5884                                 bool load_constant_pool_pointer_reg) {
5885   // Out-of-line constant pool not implemented on mips.
5886   UNREACHABLE();
5887 }
5888 
5889 
EnterFrame(StackFrame::Type type)5890 void MacroAssembler::EnterFrame(StackFrame::Type type) {
5891   int stack_offset, fp_offset;
5892   if (type == StackFrame::INTERNAL) {
5893     stack_offset = -4 * kPointerSize;
5894     fp_offset = 2 * kPointerSize;
5895   } else {
5896     stack_offset = -3 * kPointerSize;
5897     fp_offset = 1 * kPointerSize;
5898   }
5899   addiu(sp, sp, stack_offset);
5900   stack_offset = -stack_offset - kPointerSize;
5901   sw(ra, MemOperand(sp, stack_offset));
5902   stack_offset -= kPointerSize;
5903   sw(fp, MemOperand(sp, stack_offset));
5904   stack_offset -= kPointerSize;
5905   li(t9, Operand(Smi::FromInt(type)));
5906   sw(t9, MemOperand(sp, stack_offset));
5907   if (type == StackFrame::INTERNAL) {
5908     DCHECK_EQ(stack_offset, kPointerSize);
5909     li(t9, Operand(CodeObject()));
5910     sw(t9, MemOperand(sp, 0));
5911   } else {
5912     DCHECK_EQ(stack_offset, 0);
5913   }
5914   // Adjust FP to point to saved FP.
5915   Addu(fp, sp, Operand(fp_offset));
5916 }
5917 
5918 
LeaveFrame(StackFrame::Type type)5919 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
5920   addiu(sp, fp, 2 * kPointerSize);
5921   lw(ra, MemOperand(fp, 1 * kPointerSize));
5922   lw(fp, MemOperand(fp, 0 * kPointerSize));
5923 }
5924 
EnterExitFrame(bool save_doubles,int stack_space)5925 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
5926   // Set up the frame structure on the stack.
5927   STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
5928   STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
5929   STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
5930 
5931   // This is how the stack will look:
5932   // fp + 2 (==kCallerSPDisplacement) - old stack's end
5933   // [fp + 1 (==kCallerPCOffset)] - saved old ra
5934   // [fp + 0 (==kCallerFPOffset)] - saved old fp
5935   // [fp - 1 StackFrame::EXIT Smi
5936   // [fp - 2 (==kSPOffset)] - sp of the called function
5937   // [fp - 3 (==kCodeOffset)] - CodeObject
5938   // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
5939   //   new stack (will contain saved ra)
5940 
5941   // Save registers and reserve room for saved entry sp and code object.
5942   addiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
5943   sw(ra, MemOperand(sp, 4 * kPointerSize));
5944   sw(fp, MemOperand(sp, 3 * kPointerSize));
5945   li(at, Operand(Smi::FromInt(StackFrame::EXIT)));
5946   sw(at, MemOperand(sp, 2 * kPointerSize));
5947   // Set up new frame pointer.
5948   addiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
5949 
5950   if (emit_debug_code()) {
5951     sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
5952   }
5953 
5954   // Accessed from ExitFrame::code_slot.
5955   li(t8, Operand(CodeObject()), CONSTANT_SIZE);
5956   sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
5957 
5958   // Save the frame pointer and the context in top.
5959   li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5960   sw(fp, MemOperand(t8));
5961   li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5962   sw(cp, MemOperand(t8));
5963 
5964   const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
5965   if (save_doubles) {
5966     // The stack  must be allign to 0 modulo 8 for stores with sdc1.
5967     DCHECK(kDoubleSize == frame_alignment);
5968     if (frame_alignment > 0) {
5969       DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5970       And(sp, sp, Operand(-frame_alignment));  // Align stack.
5971     }
5972     int space = FPURegister::kMaxNumRegisters * kDoubleSize;
5973     Subu(sp, sp, Operand(space));
5974     // Remember: we only need to save every 2nd double FPU value.
5975     for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
5976       FPURegister reg = FPURegister::from_code(i);
5977       sdc1(reg, MemOperand(sp, i * kDoubleSize));
5978     }
5979   }
5980 
5981   // Reserve place for the return address, stack space and an optional slot
5982   // (used by the DirectCEntryStub to hold the return value if a struct is
5983   // returned) and align the frame preparing for calling the runtime function.
5984   DCHECK(stack_space >= 0);
5985   Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
5986   if (frame_alignment > 0) {
5987     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5988     And(sp, sp, Operand(-frame_alignment));  // Align stack.
5989   }
5990 
5991   // Set the exit frame sp value to point just before the return address
5992   // location.
5993   addiu(at, sp, kPointerSize);
5994   sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
5995 }
5996 
5997 
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool do_return,bool argument_count_is_length)5998 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
5999                                     bool restore_context, bool do_return,
6000                                     bool argument_count_is_length) {
6001   // Optionally restore all double registers.
6002   if (save_doubles) {
6003     // Remember: we only need to restore every 2nd double FPU value.
6004     lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
6005     for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
6006       FPURegister reg = FPURegister::from_code(i);
6007       ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
6008     }
6009   }
6010 
6011   // Clear top frame.
6012   li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
6013   sw(zero_reg, MemOperand(t8));
6014 
6015   // Restore current context from top and clear it in debug mode.
6016   if (restore_context) {
6017     li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
6018     lw(cp, MemOperand(t8));
6019   }
6020 #ifdef DEBUG
6021   li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
6022   sw(a3, MemOperand(t8));
6023 #endif
6024 
6025   // Pop the arguments, restore registers, and return.
6026   mov(sp, fp);  // Respect ABI stack constraint.
6027   lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
6028   lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
6029 
6030   if (argument_count.is_valid()) {
6031     if (argument_count_is_length) {
6032       addu(sp, sp, argument_count);
6033     } else {
6034       Lsa(sp, sp, argument_count, kPointerSizeLog2, t8);
6035     }
6036   }
6037 
6038   if (do_return) {
6039     Ret(USE_DELAY_SLOT);
6040     // If returning, the instruction in the delay slot will be the addiu below.
6041   }
6042   addiu(sp, sp, 8);
6043 }
6044 
6045 
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)6046 void MacroAssembler::InitializeNewString(Register string,
6047                                          Register length,
6048                                          Heap::RootListIndex map_index,
6049                                          Register scratch1,
6050                                          Register scratch2) {
6051   sll(scratch1, length, kSmiTagSize);
6052   LoadRoot(scratch2, map_index);
6053   sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
6054   li(scratch1, Operand(String::kEmptyHashField));
6055   sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
6056   sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
6057 }
6058 
6059 
ActivationFrameAlignment()6060 int MacroAssembler::ActivationFrameAlignment() {
6061 #if V8_HOST_ARCH_MIPS
6062   // Running on the real platform. Use the alignment as mandated by the local
6063   // environment.
6064   // Note: This will break if we ever start generating snapshots on one Mips
6065   // platform for another Mips platform with a different alignment.
6066   return base::OS::ActivationFrameAlignment();
6067 #else  // V8_HOST_ARCH_MIPS
6068   // If we are using the simulator then we should always align to the expected
6069   // alignment. As the simulator is used to generate snapshots we do not know
6070   // if the target platform will need alignment, so this is controlled from a
6071   // flag.
6072   return FLAG_sim_stack_alignment;
6073 #endif  // V8_HOST_ARCH_MIPS
6074 }
6075 
6076 
AssertStackIsAligned()6077 void MacroAssembler::AssertStackIsAligned() {
6078   if (emit_debug_code()) {
6079       const int frame_alignment = ActivationFrameAlignment();
6080       const int frame_alignment_mask = frame_alignment - 1;
6081 
6082       if (frame_alignment > kPointerSize) {
6083         Label alignment_as_expected;
6084         DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6085         andi(at, sp, frame_alignment_mask);
6086         Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
6087         // Don't use Check here, as it will call Runtime_Abort re-entering here.
6088         stop("Unexpected stack alignment");
6089         bind(&alignment_as_expected);
6090       }
6091     }
6092 }
6093 
6094 
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)6095 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
6096     Register reg,
6097     Register scratch,
6098     Label* not_power_of_two_or_zero) {
6099   Subu(scratch, reg, Operand(1));
6100   Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
6101          scratch, Operand(zero_reg));
6102   and_(at, scratch, reg);  // In the delay slot.
6103   Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
6104 }
6105 
6106 
SmiTagCheckOverflow(Register reg,Register overflow)6107 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
6108   DCHECK(!reg.is(overflow));
6109   mov(overflow, reg);  // Save original value.
6110   SmiTag(reg);
6111   xor_(overflow, overflow, reg);  // Overflow if (value ^ 2 * value) < 0.
6112 }
6113 
6114 
SmiTagCheckOverflow(Register dst,Register src,Register overflow)6115 void MacroAssembler::SmiTagCheckOverflow(Register dst,
6116                                          Register src,
6117                                          Register overflow) {
6118   if (dst.is(src)) {
6119     // Fall back to slower case.
6120     SmiTagCheckOverflow(dst, overflow);
6121   } else {
6122     DCHECK(!dst.is(src));
6123     DCHECK(!dst.is(overflow));
6124     DCHECK(!src.is(overflow));
6125     SmiTag(dst, src);
6126     xor_(overflow, dst, src);  // Overflow if (value ^ 2 * value) < 0.
6127   }
6128 }
6129 
6130 
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)6131 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
6132                                        Register src,
6133                                        Label* smi_case) {
6134   JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
6135   SmiUntag(dst, src);
6136 }
6137 
6138 
UntagAndJumpIfNotSmi(Register dst,Register src,Label * non_smi_case)6139 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
6140                                           Register src,
6141                                           Label* non_smi_case) {
6142   JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
6143   SmiUntag(dst, src);
6144 }
6145 
JumpIfSmi(Register value,Label * smi_label,Register scratch,BranchDelaySlot bd)6146 void MacroAssembler::JumpIfSmi(Register value,
6147                                Label* smi_label,
6148                                Register scratch,
6149                                BranchDelaySlot bd) {
6150   DCHECK_EQ(0, kSmiTag);
6151   andi(scratch, value, kSmiTagMask);
6152   Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
6153 }
6154 
JumpIfNotSmi(Register value,Label * not_smi_label,Register scratch,BranchDelaySlot bd)6155 void MacroAssembler::JumpIfNotSmi(Register value,
6156                                   Label* not_smi_label,
6157                                   Register scratch,
6158                                   BranchDelaySlot bd) {
6159   DCHECK_EQ(0, kSmiTag);
6160   andi(scratch, value, kSmiTagMask);
6161   Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
6162 }
6163 
6164 
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)6165 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
6166                                       Register reg2,
6167                                       Label* on_not_both_smi) {
6168   STATIC_ASSERT(kSmiTag == 0);
6169   DCHECK_EQ(1, kSmiTagMask);
6170   or_(at, reg1, reg2);
6171   JumpIfNotSmi(at, on_not_both_smi);
6172 }
6173 
6174 
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)6175 void MacroAssembler::JumpIfEitherSmi(Register reg1,
6176                                      Register reg2,
6177                                      Label* on_either_smi) {
6178   STATIC_ASSERT(kSmiTag == 0);
6179   DCHECK_EQ(1, kSmiTagMask);
6180   // Both Smi tags must be 1 (not Smi).
6181   and_(at, reg1, reg2);
6182   JumpIfSmi(at, on_either_smi);
6183 }
6184 
AssertNotNumber(Register object)6185 void MacroAssembler::AssertNotNumber(Register object) {
6186   if (emit_debug_code()) {
6187     STATIC_ASSERT(kSmiTag == 0);
6188     andi(at, object, kSmiTagMask);
6189     Check(ne, kOperandIsANumber, at, Operand(zero_reg));
6190     GetObjectType(object, t8, t8);
6191     Check(ne, kOperandIsNotANumber, t8, Operand(HEAP_NUMBER_TYPE));
6192   }
6193 }
6194 
AssertNotSmi(Register object)6195 void MacroAssembler::AssertNotSmi(Register object) {
6196   if (emit_debug_code()) {
6197     STATIC_ASSERT(kSmiTag == 0);
6198     andi(at, object, kSmiTagMask);
6199     Check(ne, kOperandIsASmi, at, Operand(zero_reg));
6200   }
6201 }
6202 
6203 
AssertSmi(Register object)6204 void MacroAssembler::AssertSmi(Register object) {
6205   if (emit_debug_code()) {
6206     STATIC_ASSERT(kSmiTag == 0);
6207     andi(at, object, kSmiTagMask);
6208     Check(eq, kOperandIsASmi, at, Operand(zero_reg));
6209   }
6210 }
6211 
6212 
AssertString(Register object)6213 void MacroAssembler::AssertString(Register object) {
6214   if (emit_debug_code()) {
6215     STATIC_ASSERT(kSmiTag == 0);
6216     SmiTst(object, t8);
6217     Check(ne, kOperandIsASmiAndNotAString, t8, Operand(zero_reg));
6218     GetObjectType(object, t8, t8);
6219     Check(lo, kOperandIsNotAString, t8, Operand(FIRST_NONSTRING_TYPE));
6220   }
6221 }
6222 
6223 
AssertName(Register object)6224 void MacroAssembler::AssertName(Register object) {
6225   if (emit_debug_code()) {
6226     STATIC_ASSERT(kSmiTag == 0);
6227     SmiTst(object, t8);
6228     Check(ne, kOperandIsASmiAndNotAName, t8, Operand(zero_reg));
6229     GetObjectType(object, t8, t8);
6230     Check(le, kOperandIsNotAName, t8, Operand(LAST_NAME_TYPE));
6231   }
6232 }
6233 
6234 
AssertFunction(Register object)6235 void MacroAssembler::AssertFunction(Register object) {
6236   if (emit_debug_code()) {
6237     STATIC_ASSERT(kSmiTag == 0);
6238     SmiTst(object, t8);
6239     Check(ne, kOperandIsASmiAndNotAFunction, t8, Operand(zero_reg));
6240     GetObjectType(object, t8, t8);
6241     Check(eq, kOperandIsNotAFunction, t8, Operand(JS_FUNCTION_TYPE));
6242   }
6243 }
6244 
6245 
AssertBoundFunction(Register object)6246 void MacroAssembler::AssertBoundFunction(Register object) {
6247   if (emit_debug_code()) {
6248     STATIC_ASSERT(kSmiTag == 0);
6249     SmiTst(object, t8);
6250     Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
6251     GetObjectType(object, t8, t8);
6252     Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
6253   }
6254 }
6255 
AssertGeneratorObject(Register object)6256 void MacroAssembler::AssertGeneratorObject(Register object) {
6257   if (emit_debug_code()) {
6258     STATIC_ASSERT(kSmiTag == 0);
6259     SmiTst(object, t8);
6260     Check(ne, kOperandIsASmiAndNotAGeneratorObject, t8, Operand(zero_reg));
6261     GetObjectType(object, t8, t8);
6262     Check(eq, kOperandIsNotAGeneratorObject, t8,
6263           Operand(JS_GENERATOR_OBJECT_TYPE));
6264   }
6265 }
6266 
AssertReceiver(Register object)6267 void MacroAssembler::AssertReceiver(Register object) {
6268   if (emit_debug_code()) {
6269     STATIC_ASSERT(kSmiTag == 0);
6270     SmiTst(object, t8);
6271     Check(ne, kOperandIsASmiAndNotAReceiver, t8, Operand(zero_reg));
6272     GetObjectType(object, t8, t8);
6273     Check(ge, kOperandIsNotAReceiver, t8, Operand(FIRST_JS_RECEIVER_TYPE));
6274   }
6275 }
6276 
6277 
AssertUndefinedOrAllocationSite(Register object,Register scratch)6278 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
6279                                                      Register scratch) {
6280   if (emit_debug_code()) {
6281     Label done_checking;
6282     AssertNotSmi(object);
6283     LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
6284     Branch(&done_checking, eq, object, Operand(scratch));
6285     lw(t8, FieldMemOperand(object, HeapObject::kMapOffset));
6286     LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
6287     Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
6288     bind(&done_checking);
6289   }
6290 }
6291 
6292 
AssertIsRoot(Register reg,Heap::RootListIndex index)6293 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
6294   if (emit_debug_code()) {
6295     DCHECK(!reg.is(at));
6296     LoadRoot(at, index);
6297     Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
6298   }
6299 }
6300 
6301 
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)6302 void MacroAssembler::JumpIfNotHeapNumber(Register object,
6303                                          Register heap_number_map,
6304                                          Register scratch,
6305                                          Label* on_not_heap_number) {
6306   lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
6307   AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
6308   Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
6309 }
6310 
6311 
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)6312 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
6313     Register first, Register second, Register scratch1, Register scratch2,
6314     Label* failure) {
6315   // Test that both first and second are sequential one-byte strings.
6316   // Assume that they are non-smis.
6317   lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
6318   lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
6319   lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
6320   lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
6321 
6322   JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
6323                                                  scratch2, failure);
6324 }
6325 
6326 
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)6327 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
6328                                                            Register second,
6329                                                            Register scratch1,
6330                                                            Register scratch2,
6331                                                            Label* failure) {
6332   // Check that neither is a smi.
6333   STATIC_ASSERT(kSmiTag == 0);
6334   And(scratch1, first, Operand(second));
6335   JumpIfSmi(scratch1, failure);
6336   JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
6337                                                scratch2, failure);
6338 }
6339 
6340 
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)6341 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
6342     Register first, Register second, Register scratch1, Register scratch2,
6343     Label* failure) {
6344   const int kFlatOneByteStringMask =
6345       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
6346   const int kFlatOneByteStringTag =
6347       kStringTag | kOneByteStringTag | kSeqStringTag;
6348   DCHECK(kFlatOneByteStringTag <= 0xffff);  // Ensure this fits 16-bit immed.
6349   andi(scratch1, first, kFlatOneByteStringMask);
6350   Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
6351   andi(scratch2, second, kFlatOneByteStringMask);
6352   Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
6353 }
6354 
6355 
JumpIfInstanceTypeIsNotSequentialOneByte(Register type,Register scratch,Label * failure)6356 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
6357                                                               Register scratch,
6358                                                               Label* failure) {
6359   const int kFlatOneByteStringMask =
6360       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
6361   const int kFlatOneByteStringTag =
6362       kStringTag | kOneByteStringTag | kSeqStringTag;
6363   And(scratch, type, Operand(kFlatOneByteStringMask));
6364   Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
6365 }
6366 
6367 
6368 static const int kRegisterPassedArguments = 4;
6369 
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)6370 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
6371                                               int num_double_arguments) {
6372   int stack_passed_words = 0;
6373   num_reg_arguments += 2 * num_double_arguments;
6374 
6375   // Up to four simple arguments are passed in registers a0..a3.
6376   if (num_reg_arguments > kRegisterPassedArguments) {
6377     stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
6378   }
6379   stack_passed_words += kCArgSlotCount;
6380   return stack_passed_words;
6381 }
6382 
6383 
EmitSeqStringSetCharCheck(Register string,Register index,Register value,Register scratch,uint32_t encoding_mask)6384 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
6385                                                Register index,
6386                                                Register value,
6387                                                Register scratch,
6388                                                uint32_t encoding_mask) {
6389   Label is_object;
6390   SmiTst(string, at);
6391   Check(ne, kNonObject, at, Operand(zero_reg));
6392 
6393   lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
6394   lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
6395 
6396   andi(at, at, kStringRepresentationMask | kStringEncodingMask);
6397   li(scratch, Operand(encoding_mask));
6398   Check(eq, kUnexpectedStringType, at, Operand(scratch));
6399 
6400   // The index is assumed to be untagged coming in, tag it to compare with the
6401   // string length without using a temp register, it is restored at the end of
6402   // this function.
6403   Label index_tag_ok, index_tag_bad;
6404   TrySmiTag(index, scratch, &index_tag_bad);
6405   Branch(&index_tag_ok);
6406   bind(&index_tag_bad);
6407   Abort(kIndexIsTooLarge);
6408   bind(&index_tag_ok);
6409 
6410   lw(at, FieldMemOperand(string, String::kLengthOffset));
6411   Check(lt, kIndexIsTooLarge, index, Operand(at));
6412 
6413   DCHECK(Smi::FromInt(0) == 0);
6414   Check(ge, kIndexIsNegative, index, Operand(zero_reg));
6415 
6416   SmiUntag(index, index);
6417 }
6418 
6419 
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)6420 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6421                                           int num_double_arguments,
6422                                           Register scratch) {
6423   int frame_alignment = ActivationFrameAlignment();
6424 
6425   // Up to four simple arguments are passed in registers a0..a3.
6426   // Those four arguments must have reserved argument slots on the stack for
6427   // mips, even though those argument slots are not normally used.
6428   // Remaining arguments are pushed on the stack, above (higher address than)
6429   // the argument slots.
6430   int stack_passed_arguments = CalculateStackPassedWords(
6431       num_reg_arguments, num_double_arguments);
6432   if (frame_alignment > kPointerSize) {
6433     // Make stack end at alignment and make room for num_arguments - 4 words
6434     // and the original value of sp.
6435     mov(scratch, sp);
6436     Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
6437     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6438     And(sp, sp, Operand(-frame_alignment));
6439     sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
6440   } else {
6441     Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6442   }
6443 }
6444 
6445 
PrepareCallCFunction(int num_reg_arguments,Register scratch)6446 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6447                                           Register scratch) {
6448   PrepareCallCFunction(num_reg_arguments, 0, scratch);
6449 }
6450 
6451 
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)6452 void MacroAssembler::CallCFunction(ExternalReference function,
6453                                    int num_reg_arguments,
6454                                    int num_double_arguments) {
6455   li(t8, Operand(function));
6456   CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
6457 }
6458 
6459 
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)6460 void MacroAssembler::CallCFunction(Register function,
6461                                    int num_reg_arguments,
6462                                    int num_double_arguments) {
6463   CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
6464 }
6465 
6466 
CallCFunction(ExternalReference function,int num_arguments)6467 void MacroAssembler::CallCFunction(ExternalReference function,
6468                                    int num_arguments) {
6469   CallCFunction(function, num_arguments, 0);
6470 }
6471 
6472 
CallCFunction(Register function,int num_arguments)6473 void MacroAssembler::CallCFunction(Register function,
6474                                    int num_arguments) {
6475   CallCFunction(function, num_arguments, 0);
6476 }
6477 
6478 
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)6479 void MacroAssembler::CallCFunctionHelper(Register function,
6480                                          int num_reg_arguments,
6481                                          int num_double_arguments) {
6482   DCHECK(has_frame());
6483   // Make sure that the stack is aligned before calling a C function unless
6484   // running in the simulator. The simulator has its own alignment check which
6485   // provides more information.
6486   // The argument stots are presumed to have been set up by
6487   // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
6488 
6489 #if V8_HOST_ARCH_MIPS
6490   if (emit_debug_code()) {
6491     int frame_alignment = base::OS::ActivationFrameAlignment();
6492     int frame_alignment_mask = frame_alignment - 1;
6493     if (frame_alignment > kPointerSize) {
6494       DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6495       Label alignment_as_expected;
6496       And(at, sp, Operand(frame_alignment_mask));
6497       Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
6498       // Don't use Check here, as it will call Runtime_Abort possibly
6499       // re-entering here.
6500       stop("Unexpected alignment in CallCFunction");
6501       bind(&alignment_as_expected);
6502     }
6503   }
6504 #endif  // V8_HOST_ARCH_MIPS
6505 
6506   // Just call directly. The function called cannot cause a GC, or
6507   // allow preemption, so the return address in the link register
6508   // stays correct.
6509 
6510   if (!function.is(t9)) {
6511     mov(t9, function);
6512     function = t9;
6513   }
6514 
6515   Call(function);
6516 
6517   int stack_passed_arguments = CalculateStackPassedWords(
6518       num_reg_arguments, num_double_arguments);
6519 
6520   if (base::OS::ActivationFrameAlignment() > kPointerSize) {
6521     lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
6522   } else {
6523     Addu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6524   }
6525 }
6526 
6527 
6528 #undef BRANCH_ARGS_CHECK
6529 
6530 
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)6531 void MacroAssembler::CheckPageFlag(
6532     Register object,
6533     Register scratch,
6534     int mask,
6535     Condition cc,
6536     Label* condition_met) {
6537   And(scratch, object, Operand(~Page::kPageAlignmentMask));
6538   lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
6539   And(scratch, scratch, Operand(mask));
6540   Branch(condition_met, cc, scratch, Operand(zero_reg));
6541 }
6542 
6543 
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)6544 void MacroAssembler::JumpIfBlack(Register object,
6545                                  Register scratch0,
6546                                  Register scratch1,
6547                                  Label* on_black) {
6548   HasColor(object, scratch0, scratch1, on_black, 1, 1);  // kBlackBitPattern.
6549   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
6550 }
6551 
6552 
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)6553 void MacroAssembler::HasColor(Register object,
6554                               Register bitmap_scratch,
6555                               Register mask_scratch,
6556                               Label* has_color,
6557                               int first_bit,
6558                               int second_bit) {
6559   DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
6560   DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
6561 
6562   GetMarkBits(object, bitmap_scratch, mask_scratch);
6563 
6564   Label other_color, word_boundary;
6565   lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6566   And(t8, t9, Operand(mask_scratch));
6567   Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
6568   // Shift left 1 by adding.
6569   Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
6570   Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
6571   And(t8, t9, Operand(mask_scratch));
6572   Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
6573   jmp(&other_color);
6574 
6575   bind(&word_boundary);
6576   lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
6577   And(t9, t9, Operand(1));
6578   Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
6579   bind(&other_color);
6580 }
6581 
6582 
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)6583 void MacroAssembler::GetMarkBits(Register addr_reg,
6584                                  Register bitmap_reg,
6585                                  Register mask_reg) {
6586   DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
6587   And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
6588   Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
6589   const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
6590   Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
6591   Lsa(bitmap_reg, bitmap_reg, t8, kPointerSizeLog2, t8);
6592   li(t8, Operand(1));
6593   sllv(mask_reg, t8, mask_reg);
6594 }
6595 
6596 
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white)6597 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
6598                                  Register mask_scratch, Register load_scratch,
6599                                  Label* value_is_white) {
6600   DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
6601   GetMarkBits(value, bitmap_scratch, mask_scratch);
6602 
6603   // If the value is black or grey we don't need to do anything.
6604   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
6605   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
6606   DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
6607   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
6608 
6609   // Since both black and grey have a 1 in the first position and white does
6610   // not have a 1 there we only need to check one bit.
6611   lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6612   And(t8, mask_scratch, load_scratch);
6613   Branch(value_is_white, eq, t8, Operand(zero_reg));
6614 }
6615 
6616 
LoadInstanceDescriptors(Register map,Register descriptors)6617 void MacroAssembler::LoadInstanceDescriptors(Register map,
6618                                              Register descriptors) {
6619   lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
6620 }
6621 
6622 
NumberOfOwnDescriptors(Register dst,Register map)6623 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
6624   lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
6625   DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
6626 }
6627 
6628 
EnumLength(Register dst,Register map)6629 void MacroAssembler::EnumLength(Register dst, Register map) {
6630   STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
6631   lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
6632   And(dst, dst, Operand(Map::EnumLengthBits::kMask));
6633   SmiTag(dst);
6634 }
6635 
6636 
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)6637 void MacroAssembler::LoadAccessor(Register dst, Register holder,
6638                                   int accessor_index,
6639                                   AccessorComponent accessor) {
6640   lw(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
6641   LoadInstanceDescriptors(dst, dst);
6642   lw(dst,
6643      FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
6644   int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
6645                                            : AccessorPair::kSetterOffset;
6646   lw(dst, FieldMemOperand(dst, offset));
6647 }
6648 
6649 
CheckEnumCache(Label * call_runtime)6650 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
6651   Register null_value = t1;
6652   Register  empty_fixed_array_value = t2;
6653   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
6654   Label next, start;
6655   mov(a2, a0);
6656 
6657   // Check if the enum length field is properly initialized, indicating that
6658   // there is an enum cache.
6659   lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6660 
6661   EnumLength(a3, a1);
6662   Branch(
6663       call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
6664 
6665   LoadRoot(null_value, Heap::kNullValueRootIndex);
6666   jmp(&start);
6667 
6668   bind(&next);
6669   lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6670 
6671   // For all objects but the receiver, check that the cache is empty.
6672   EnumLength(a3, a1);
6673   Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
6674 
6675   bind(&start);
6676 
6677   // Check that there are no elements. Register a2 contains the current JS
6678   // object we've reached through the prototype chain.
6679   Label no_elements;
6680   lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
6681   Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
6682 
6683   // Second chance, the object may be using the empty slow element dictionary.
6684   LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
6685   Branch(call_runtime, ne, a2, Operand(at));
6686 
6687   bind(&no_elements);
6688   lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
6689   Branch(&next, ne, a2, Operand(null_value));
6690 }
6691 
6692 
ClampUint8(Register output_reg,Register input_reg)6693 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
6694   DCHECK(!output_reg.is(input_reg));
6695   Label done;
6696   li(output_reg, Operand(255));
6697   // Normal branch: nop in delay slot.
6698   Branch(&done, gt, input_reg, Operand(output_reg));
6699   // Use delay slot in this branch.
6700   Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
6701   mov(output_reg, zero_reg);  // In delay slot.
6702   mov(output_reg, input_reg);  // Value is in range 0..255.
6703   bind(&done);
6704 }
6705 
6706 
ClampDoubleToUint8(Register result_reg,DoubleRegister input_reg,DoubleRegister temp_double_reg)6707 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
6708                                         DoubleRegister input_reg,
6709                                         DoubleRegister temp_double_reg) {
6710   Label above_zero;
6711   Label done;
6712   Label in_bounds;
6713 
6714   Move(temp_double_reg, 0.0);
6715   BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
6716 
6717   // Double value is less than zero, NaN or Inf, return 0.
6718   mov(result_reg, zero_reg);
6719   Branch(&done);
6720 
6721   // Double value is >= 255, return 255.
6722   bind(&above_zero);
6723   Move(temp_double_reg, 255.0);
6724   BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
6725   li(result_reg, Operand(255));
6726   Branch(&done);
6727 
6728   // In 0-255 range, round and truncate.
6729   bind(&in_bounds);
6730   cvt_w_d(temp_double_reg, input_reg);
6731   mfc1(result_reg, temp_double_reg);
6732   bind(&done);
6733 }
6734 
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found)6735 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
6736                                                      Register scratch_reg,
6737                                                      Label* no_memento_found) {
6738   Label map_check;
6739   Label top_check;
6740   ExternalReference new_space_allocation_top_adr =
6741       ExternalReference::new_space_allocation_top_address(isolate());
6742   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
6743   const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
6744 
6745   // Bail out if the object is not in new space.
6746   JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
6747   // If the object is in new space, we need to check whether it is on the same
6748   // page as the current top.
6749   Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
6750   li(at, Operand(new_space_allocation_top_adr));
6751   lw(at, MemOperand(at));
6752   Xor(scratch_reg, scratch_reg, Operand(at));
6753   And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
6754   Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
6755   // The object is on a different page than allocation top. Bail out if the
6756   // object sits on the page boundary as no memento can follow and we cannot
6757   // touch the memory following it.
6758   Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
6759   Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
6760   And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
6761   Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
6762   // Continue with the actual map check.
6763   jmp(&map_check);
6764   // If top is on the same page as the current object, we need to check whether
6765   // we are below top.
6766   bind(&top_check);
6767   Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
6768   li(at, Operand(new_space_allocation_top_adr));
6769   lw(at, MemOperand(at));
6770   Branch(no_memento_found, gt, scratch_reg, Operand(at));
6771   // Memento map check.
6772   bind(&map_check);
6773   lw(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
6774   Branch(no_memento_found, ne, scratch_reg,
6775          Operand(isolate()->factory()->allocation_memento_map()));
6776 }
6777 
6778 
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)6779 Register GetRegisterThatIsNotOneOf(Register reg1,
6780                                    Register reg2,
6781                                    Register reg3,
6782                                    Register reg4,
6783                                    Register reg5,
6784                                    Register reg6) {
6785   RegList regs = 0;
6786   if (reg1.is_valid()) regs |= reg1.bit();
6787   if (reg2.is_valid()) regs |= reg2.bit();
6788   if (reg3.is_valid()) regs |= reg3.bit();
6789   if (reg4.is_valid()) regs |= reg4.bit();
6790   if (reg5.is_valid()) regs |= reg5.bit();
6791   if (reg6.is_valid()) regs |= reg6.bit();
6792 
6793   const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
6794   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
6795     int code = config->GetAllocatableGeneralCode(i);
6796     Register candidate = Register::from_code(code);
6797     if (regs & candidate.bit()) continue;
6798     return candidate;
6799   }
6800   UNREACHABLE();
6801   return no_reg;
6802 }
6803 
6804 
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)6805 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
6806     Register object,
6807     Register scratch0,
6808     Register scratch1,
6809     Label* found) {
6810   DCHECK(!scratch1.is(scratch0));
6811   Factory* factory = isolate()->factory();
6812   Register current = scratch0;
6813   Label loop_again, end;
6814 
6815   // Scratch contained elements pointer.
6816   Move(current, object);
6817   lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
6818   lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
6819   Branch(&end, eq, current, Operand(factory->null_value()));
6820 
6821   // Loop based on the map going up the prototype chain.
6822   bind(&loop_again);
6823   lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
6824   lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
6825   STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
6826   STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
6827   Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
6828   lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
6829   DecodeField<Map::ElementsKindBits>(scratch1);
6830   Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
6831   lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
6832   Branch(&loop_again, ne, current, Operand(factory->null_value()));
6833 
6834   bind(&end);
6835 }
6836 
6837 
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8,Register reg9,Register reg10)6838 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
6839                 Register reg5, Register reg6, Register reg7, Register reg8,
6840                 Register reg9, Register reg10) {
6841   int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
6842                         reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
6843                         reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
6844                         reg10.is_valid();
6845 
6846   RegList regs = 0;
6847   if (reg1.is_valid()) regs |= reg1.bit();
6848   if (reg2.is_valid()) regs |= reg2.bit();
6849   if (reg3.is_valid()) regs |= reg3.bit();
6850   if (reg4.is_valid()) regs |= reg4.bit();
6851   if (reg5.is_valid()) regs |= reg5.bit();
6852   if (reg6.is_valid()) regs |= reg6.bit();
6853   if (reg7.is_valid()) regs |= reg7.bit();
6854   if (reg8.is_valid()) regs |= reg8.bit();
6855   if (reg9.is_valid()) regs |= reg9.bit();
6856   if (reg10.is_valid()) regs |= reg10.bit();
6857   int n_of_non_aliasing_regs = NumRegs(regs);
6858 
6859   return n_of_valid_regs != n_of_non_aliasing_regs;
6860 }
6861 
6862 
CodePatcher(Isolate * isolate,byte * address,int instructions,FlushICache flush_cache)6863 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
6864                          FlushICache flush_cache)
6865     : address_(address),
6866       size_(instructions * Assembler::kInstrSize),
6867       masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
6868       flush_cache_(flush_cache) {
6869   // Create a new macro assembler pointing to the address of the code to patch.
6870   // The size is adjusted with kGap on order for the assembler to generate size
6871   // bytes of instructions without failing with buffer size constraints.
6872   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6873 }
6874 
6875 
~CodePatcher()6876 CodePatcher::~CodePatcher() {
6877   // Indicate that code has changed.
6878   if (flush_cache_ == FLUSH) {
6879     Assembler::FlushICache(masm_.isolate(), address_, size_);
6880   }
6881 
6882   // Check that the code was patched as expected.
6883   DCHECK(masm_.pc_ == address_ + size_);
6884   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6885 }
6886 
6887 
Emit(Instr instr)6888 void CodePatcher::Emit(Instr instr) {
6889   masm()->emit(instr);
6890 }
6891 
6892 
Emit(Address addr)6893 void CodePatcher::Emit(Address addr) {
6894   masm()->emit(reinterpret_cast<Instr>(addr));
6895 }
6896 
6897 
ChangeBranchCondition(Instr current_instr,uint32_t new_opcode)6898 void CodePatcher::ChangeBranchCondition(Instr current_instr,
6899                                         uint32_t new_opcode) {
6900   current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
6901   masm_.emit(current_instr);
6902 }
6903 
6904 
TruncatingDiv(Register result,Register dividend,int32_t divisor)6905 void MacroAssembler::TruncatingDiv(Register result,
6906                                    Register dividend,
6907                                    int32_t divisor) {
6908   DCHECK(!dividend.is(result));
6909   DCHECK(!dividend.is(at));
6910   DCHECK(!result.is(at));
6911   base::MagicNumbersForDivision<uint32_t> mag =
6912       base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
6913   li(at, Operand(mag.multiplier));
6914   Mulh(result, dividend, Operand(at));
6915   bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
6916   if (divisor > 0 && neg) {
6917     Addu(result, result, Operand(dividend));
6918   }
6919   if (divisor < 0 && !neg && mag.multiplier > 0) {
6920     Subu(result, result, Operand(dividend));
6921   }
6922   if (mag.shift > 0) sra(result, result, mag.shift);
6923   srl(at, dividend, 31);
6924   Addu(result, result, Operand(at));
6925 }
6926 
6927 
6928 }  // namespace internal
6929 }  // namespace v8
6930 
6931 #endif  // V8_TARGET_ARCH_MIPS
6932