• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
6 #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
7 
8 #include "src/assembler.h"
9 #include "src/globals.h"
10 #include "src/mips/assembler-mips.h"
11 
12 namespace v8 {
13 namespace internal {
14 
15 // Give alias names to registers for calling conventions.
16 const Register kReturnRegister0 = {Register::kCode_v0};
17 const Register kReturnRegister1 = {Register::kCode_v1};
18 const Register kReturnRegister2 = {Register::kCode_a0};
19 const Register kJSFunctionRegister = {Register::kCode_a1};
20 const Register kContextRegister = {Register::kCpRegister};
21 const Register kAllocateSizeRegister = {Register::kCode_a0};
22 const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
23 const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t4};
24 const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t5};
25 const Register kInterpreterDispatchTableRegister = {Register::kCode_t6};
26 const Register kJavaScriptCallArgCountRegister = {Register::kCode_a0};
27 const Register kJavaScriptCallNewTargetRegister = {Register::kCode_a3};
28 const Register kRuntimeCallFunctionRegister = {Register::kCode_a1};
29 const Register kRuntimeCallArgCountRegister = {Register::kCode_a0};
30 
31 // Forward declaration.
32 class JumpTarget;
33 
34 // Reserved Register Usage Summary.
35 //
36 // Registers t8, t9, and at are reserved for use by the MacroAssembler.
37 //
38 // The programmer should know that the MacroAssembler may clobber these three,
39 // but won't touch other registers except in special cases.
40 //
41 // Per the MIPS ABI, register t9 must be used for indirect function call
42 // via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
43 // trying to update gp register for position-independent-code. Whenever
44 // MIPS generated code calls C code, it must be via t9 register.
45 
46 
47 // Flags used for LeaveExitFrame function.
48 enum LeaveExitFrameMode {
49   EMIT_RETURN = true,
50   NO_EMIT_RETURN = false
51 };
52 
53 // Flags used for AllocateHeapNumber
54 enum TaggingMode {
55   // Tag the result.
56   TAG_RESULT,
57   // Don't tag
58   DONT_TAG_RESULT
59 };
60 
61 // Flags used for the ObjectToDoubleFPURegister function.
62 enum ObjectToDoubleFlags {
63   // No special flags.
64   NO_OBJECT_TO_DOUBLE_FLAGS = 0,
65   // Object is known to be a non smi.
66   OBJECT_NOT_SMI = 1 << 0,
67   // Don't load NaNs or infinities, branch to the non number case instead.
68   AVOID_NANS_AND_INFINITIES = 1 << 1
69 };
70 
71 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
72 enum BranchDelaySlot {
73   USE_DELAY_SLOT,
74   PROTECT
75 };
76 
77 // Flags used for the li macro-assembler function.
78 enum LiFlags {
79   // If the constant value can be represented in just 16 bits, then
80   // optimize the li to use a single instruction, rather than lui/ori pair.
81   OPTIMIZE_SIZE = 0,
82   // Always use 2 instructions (lui/ori pair), even if the constant could
83   // be loaded with just one, so that this value is patchable later.
84   CONSTANT_SIZE = 1
85 };
86 
87 
88 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
89 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
90 enum PointersToHereCheck {
91   kPointersToHereMaybeInteresting,
92   kPointersToHereAreAlwaysInteresting
93 };
94 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
95 
96 Register GetRegisterThatIsNotOneOf(Register reg1,
97                                    Register reg2 = no_reg,
98                                    Register reg3 = no_reg,
99                                    Register reg4 = no_reg,
100                                    Register reg5 = no_reg,
101                                    Register reg6 = no_reg);
102 
103 bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
104                 Register reg4 = no_reg, Register reg5 = no_reg,
105                 Register reg6 = no_reg, Register reg7 = no_reg,
106                 Register reg8 = no_reg, Register reg9 = no_reg,
107                 Register reg10 = no_reg);
108 
109 
110 // -----------------------------------------------------------------------------
111 // Static helper functions.
112 
ContextMemOperand(Register context,int index)113 inline MemOperand ContextMemOperand(Register context, int index) {
114   return MemOperand(context, Context::SlotOffset(index));
115 }
116 
117 
NativeContextMemOperand()118 inline MemOperand NativeContextMemOperand() {
119   return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
120 }
121 
122 
123 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object,int offset)124 inline MemOperand FieldMemOperand(Register object, int offset) {
125   return MemOperand(object, offset - kHeapObjectTag);
126 }
127 
128 
129 // Generate a MemOperand for storing arguments 5..N on the stack
130 // when calling CallCFunction().
CFunctionArgumentOperand(int index)131 inline MemOperand CFunctionArgumentOperand(int index) {
132   DCHECK(index > kCArgSlotCount);
133   // Argument 5 takes the slot just past the four Arg-slots.
134   int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
135   return MemOperand(sp, offset);
136 }
137 
138 
139 // MacroAssembler implements a collection of frequently used macros.
140 class MacroAssembler: public Assembler {
141  public:
142   MacroAssembler(Isolate* isolate, void* buffer, int size,
143                  CodeObjectRequired create_code_object);
144 
145   // Arguments macros.
146 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
147 #define COND_ARGS cond, r1, r2
148 
149   // Cases when relocation is not needed.
150 #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
151   void Name(target_type target, BranchDelaySlot bd = PROTECT); \
152   inline void Name(BranchDelaySlot bd, target_type target) { \
153     Name(target, bd); \
154   } \
155   void Name(target_type target, \
156             COND_TYPED_ARGS, \
157             BranchDelaySlot bd = PROTECT); \
158   inline void Name(BranchDelaySlot bd, \
159                    target_type target, \
160                    COND_TYPED_ARGS) { \
161     Name(target, COND_ARGS, bd); \
162   }
163 
164 #define DECLARE_BRANCH_PROTOTYPES(Name)   \
165   DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
166   DECLARE_NORELOC_PROTOTYPE(Name, int32_t)
167 
168   DECLARE_BRANCH_PROTOTYPES(Branch)
169   DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
170   DECLARE_BRANCH_PROTOTYPES(BranchShort)
171 
172 #undef DECLARE_BRANCH_PROTOTYPES
173 #undef COND_TYPED_ARGS
174 #undef COND_ARGS
175 
176 
177   // Jump, Call, and Ret pseudo instructions implementing inter-working.
178 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
179   const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
180 
181   void Jump(Register target, COND_ARGS);
182   void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
183   void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
184   void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
185   static int CallSize(Register target, COND_ARGS);
186   void Call(Register target, COND_ARGS);
187   static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
188   void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
189   int CallSize(Handle<Code> code,
190                RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
191                TypeFeedbackId ast_id = TypeFeedbackId::None(),
192                COND_ARGS);
193   void Call(Handle<Code> code,
194             RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
195             TypeFeedbackId ast_id = TypeFeedbackId::None(),
196             COND_ARGS);
197   void Ret(COND_ARGS);
198   inline void Ret(BranchDelaySlot bd, Condition cond = al,
199     Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
200     Ret(cond, rs, rt, bd);
201   }
202 
203   bool IsNear(Label* L, Condition cond, int rs_reg);
204 
205   void Branch(Label* L,
206               Condition cond,
207               Register rs,
208               Heap::RootListIndex index,
209               BranchDelaySlot bdslot = PROTECT);
210 
211   // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
212   // functor/function with 'Label *func(size_t index)' declaration.
213   template <typename Func>
214   void GenerateSwitchTable(Register index, size_t case_count,
215                            Func GetLabelFunction);
216 #undef COND_ARGS
217 
218   // Emit code that loads |parameter_index|'th parameter from the stack to
219   // the register according to the CallInterfaceDescriptor definition.
220   // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
221   // below the caller's sp.
222   template <class Descriptor>
223   void LoadParameterFromStack(
224       Register reg, typename Descriptor::ParameterIndices parameter_index,
225       int sp_to_ra_offset_in_words = 0) {
226     DCHECK(Descriptor::kPassLastArgsOnStack);
227     UNIMPLEMENTED();
228   }
229 
230   // Emit code to discard a non-negative number of pointer-sized elements
231   // from the stack, clobbering only the sp register.
232   void Drop(int count,
233             Condition cond = cc_always,
234             Register reg = no_reg,
235             const Operand& op = Operand(no_reg));
236 
237   // Trivial case of DropAndRet that utilizes the delay slot and only emits
238   // 2 instructions.
239   void DropAndRet(int drop);
240 
241   void DropAndRet(int drop,
242                   Condition cond,
243                   Register reg,
244                   const Operand& op);
245 
246   // Swap two registers.  If the scratch register is omitted then a slightly
247   // less efficient form using xor instead of mov is emitted.
248   void Swap(Register reg1, Register reg2, Register scratch = no_reg);
249 
250   void Call(Label* target);
251 
Move(Register dst,Handle<Object> handle)252   inline void Move(Register dst, Handle<Object> handle) { li(dst, handle); }
Move(Register dst,Smi * smi)253   inline void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
254 
Move(Register dst,Register src)255   inline void Move(Register dst, Register src) {
256     if (!dst.is(src)) {
257       mov(dst, src);
258     }
259   }
260 
Move_d(FPURegister dst,FPURegister src)261   inline void Move_d(FPURegister dst, FPURegister src) {
262     if (!dst.is(src)) {
263       mov_d(dst, src);
264     }
265   }
266 
Move_s(FPURegister dst,FPURegister src)267   inline void Move_s(FPURegister dst, FPURegister src) {
268     if (!dst.is(src)) {
269       mov_s(dst, src);
270     }
271   }
272 
Move(FPURegister dst,FPURegister src)273   inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); }
274 
Move(Register dst_low,Register dst_high,FPURegister src)275   inline void Move(Register dst_low, Register dst_high, FPURegister src) {
276     mfc1(dst_low, src);
277     Mfhc1(dst_high, src);
278   }
279 
FmoveHigh(Register dst_high,FPURegister src)280   inline void FmoveHigh(Register dst_high, FPURegister src) {
281     Mfhc1(dst_high, src);
282   }
283 
FmoveHigh(FPURegister dst,Register src_high)284   inline void FmoveHigh(FPURegister dst, Register src_high) {
285     Mthc1(src_high, dst);
286   }
287 
FmoveLow(Register dst_low,FPURegister src)288   inline void FmoveLow(Register dst_low, FPURegister src) {
289     mfc1(dst_low, src);
290   }
291 
292   void FmoveLow(FPURegister dst, Register src_low);
293 
Move(FPURegister dst,Register src_low,Register src_high)294   inline void Move(FPURegister dst, Register src_low, Register src_high) {
295     mtc1(src_low, dst);
296     Mthc1(src_high, dst);
297   }
298 
299   void Move(FPURegister dst, float imm);
300   void Move(FPURegister dst, double imm);
301 
302   // Conditional move.
303   void Movz(Register rd, Register rs, Register rt);
304   void Movn(Register rd, Register rs, Register rt);
305   void Movt(Register rd, Register rs, uint16_t cc = 0);
306   void Movf(Register rd, Register rs, uint16_t cc = 0);
307 
308   // Min, Max macros.
309   // On pre-r6 these functions may modify at and t8 registers.
310   void MinNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
311                      Label* nan = nullptr);
312   void MaxNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
313                      Label* nan = nullptr);
314   void MinNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
315                      Label* nan = nullptr);
316   void MaxNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
317                      Label* nan = nullptr);
318 
319   void Clz(Register rd, Register rs);
320 
321   // Jump unconditionally to given label.
322   // We NEED a nop in the branch delay slot, as it used by v8, for example in
323   // CodeGenerator::ProcessDeferred().
324   // Currently the branch delay slot is filled by the MacroAssembler.
325   // Use rather b(Label) for code generation.
jmp(Label * L)326   void jmp(Label* L) {
327     Branch(L);
328   }
329 
330   void Load(Register dst, const MemOperand& src, Representation r);
331   void Store(Register src, const MemOperand& dst, Representation r);
332 
PushRoot(Heap::RootListIndex index)333   void PushRoot(Heap::RootListIndex index) {
334     LoadRoot(at, index);
335     Push(at);
336   }
337 
338   // Compare the object in a register to a value and jump if they are equal.
JumpIfRoot(Register with,Heap::RootListIndex index,Label * if_equal)339   void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
340     LoadRoot(at, index);
341     Branch(if_equal, eq, with, Operand(at));
342   }
343 
344   // Compare the object in a register to a value and jump if they are not equal.
JumpIfNotRoot(Register with,Heap::RootListIndex index,Label * if_not_equal)345   void JumpIfNotRoot(Register with, Heap::RootListIndex index,
346                      Label* if_not_equal) {
347     LoadRoot(at, index);
348     Branch(if_not_equal, ne, with, Operand(at));
349   }
350 
351   // Load an object from the root table.
352   void LoadRoot(Register destination,
353                 Heap::RootListIndex index);
354   void LoadRoot(Register destination,
355                 Heap::RootListIndex index,
356                 Condition cond, Register src1, const Operand& src2);
357 
358   // Store an object to the root table.
359   void StoreRoot(Register source,
360                  Heap::RootListIndex index);
361   void StoreRoot(Register source,
362                  Heap::RootListIndex index,
363                  Condition cond, Register src1, const Operand& src2);
364 
365   // ---------------------------------------------------------------------------
366   // GC Support
367 
368   void IncrementalMarkingRecordWriteHelper(Register object,
369                                            Register value,
370                                            Register address);
371 
372   enum RememberedSetFinalAction {
373     kReturnAtEnd,
374     kFallThroughAtEnd
375   };
376 
377 
378   // Record in the remembered set the fact that we have a pointer to new space
379   // at the address pointed to by the addr register.  Only works if addr is not
380   // in new space.
381   void RememberedSetHelper(Register object,  // Used for debug code.
382                            Register addr,
383                            Register scratch,
384                            SaveFPRegsMode save_fp,
385                            RememberedSetFinalAction and_then);
386 
387   void CheckPageFlag(Register object,
388                      Register scratch,
389                      int mask,
390                      Condition cc,
391                      Label* condition_met);
392 
393   // Check if object is in new space.  Jumps if the object is not in new space.
394   // The register scratch can be object itself, but it will be clobbered.
JumpIfNotInNewSpace(Register object,Register scratch,Label * branch)395   void JumpIfNotInNewSpace(Register object,
396                            Register scratch,
397                            Label* branch) {
398     InNewSpace(object, scratch, eq, branch);
399   }
400 
401   // Check if object is in new space.  Jumps if the object is in new space.
402   // The register scratch can be object itself, but scratch will be clobbered.
JumpIfInNewSpace(Register object,Register scratch,Label * branch)403   void JumpIfInNewSpace(Register object,
404                         Register scratch,
405                         Label* branch) {
406     InNewSpace(object, scratch, ne, branch);
407   }
408 
409   // Check if an object has a given incremental marking color.
410   void HasColor(Register object,
411                 Register scratch0,
412                 Register scratch1,
413                 Label* has_color,
414                 int first_bit,
415                 int second_bit);
416 
417   void JumpIfBlack(Register object,
418                    Register scratch0,
419                    Register scratch1,
420                    Label* on_black);
421 
422   // Checks the color of an object.  If the object is white we jump to the
423   // incremental marker.
424   void JumpIfWhite(Register value, Register scratch1, Register scratch2,
425                    Register scratch3, Label* value_is_white);
426 
427   // Notify the garbage collector that we wrote a pointer into an object.
428   // |object| is the object being stored into, |value| is the object being
429   // stored.  value and scratch registers are clobbered by the operation.
430   // The offset is the offset from the start of the object, not the offset from
431   // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
432   void RecordWriteField(
433       Register object,
434       int offset,
435       Register value,
436       Register scratch,
437       RAStatus ra_status,
438       SaveFPRegsMode save_fp,
439       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
440       SmiCheck smi_check = INLINE_SMI_CHECK,
441       PointersToHereCheck pointers_to_here_check_for_value =
442           kPointersToHereMaybeInteresting);
443 
444   // As above, but the offset has the tag presubtracted.  For use with
445   // MemOperand(reg, off).
446   inline void RecordWriteContextSlot(
447       Register context,
448       int offset,
449       Register value,
450       Register scratch,
451       RAStatus ra_status,
452       SaveFPRegsMode save_fp,
453       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
454       SmiCheck smi_check = INLINE_SMI_CHECK,
455       PointersToHereCheck pointers_to_here_check_for_value =
456           kPointersToHereMaybeInteresting) {
457     RecordWriteField(context,
458                      offset + kHeapObjectTag,
459                      value,
460                      scratch,
461                      ra_status,
462                      save_fp,
463                      remembered_set_action,
464                      smi_check,
465                      pointers_to_here_check_for_value);
466   }
467 
468   // Notify the garbage collector that we wrote a code entry into a
469   // JSFunction. Only scratch is clobbered by the operation.
470   void RecordWriteCodeEntryField(Register js_function, Register code_entry,
471                                  Register scratch);
472 
473   void RecordWriteForMap(
474       Register object,
475       Register map,
476       Register dst,
477       RAStatus ra_status,
478       SaveFPRegsMode save_fp);
479 
480   // For a given |object| notify the garbage collector that the slot |address|
481   // has been written.  |value| is the object being stored. The value and
482   // address registers are clobbered by the operation.
483   void RecordWrite(
484       Register object,
485       Register address,
486       Register value,
487       RAStatus ra_status,
488       SaveFPRegsMode save_fp,
489       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
490       SmiCheck smi_check = INLINE_SMI_CHECK,
491       PointersToHereCheck pointers_to_here_check_for_value =
492           kPointersToHereMaybeInteresting);
493 
494 
495   // ---------------------------------------------------------------------------
496   // Inline caching support.
497 
498   void GetNumberHash(Register reg0, Register scratch);
499 
MarkCode(NopMarkerTypes type)500   inline void MarkCode(NopMarkerTypes type) {
501     nop(type);
502   }
503 
504   // Check if the given instruction is a 'type' marker.
505   // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
506   // nop(type)). These instructions are generated to mark special location in
507   // the code, like some special IC code.
IsMarkedCode(Instr instr,int type)508   static inline bool IsMarkedCode(Instr instr, int type) {
509     DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
510     return IsNop(instr, type);
511   }
512 
513 
GetCodeMarker(Instr instr)514   static inline int GetCodeMarker(Instr instr) {
515     uint32_t opcode = ((instr & kOpcodeMask));
516     uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
517     uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
518     uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
519 
520     // Return <n> if we have a sll zero_reg, zero_reg, n
521     // else return -1.
522     bool sllzz = (opcode == SLL &&
523                   rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
524                   rs == static_cast<uint32_t>(ToNumber(zero_reg)));
525     int type =
526         (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
527     DCHECK((type == -1) ||
528            ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
529     return type;
530   }
531 
532 
533 
534   // ---------------------------------------------------------------------------
535   // Allocation support.
536 
537   // Allocate an object in new space or old space. The object_size is
538   // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
539   // is passed. If the space is exhausted control continues at the gc_required
540   // label. The allocated object is returned in result. If the flag
541   // tag_allocated_object is true the result is tagged as as a heap object.
542   // All registers are clobbered also when control continues at the gc_required
543   // label.
544   void Allocate(int object_size,
545                 Register result,
546                 Register scratch1,
547                 Register scratch2,
548                 Label* gc_required,
549                 AllocationFlags flags);
550 
551   void Allocate(Register object_size, Register result, Register result_new,
552                 Register scratch, Label* gc_required, AllocationFlags flags);
553 
554   // FastAllocate is right now only used for folded allocations. It just
555   // increments the top pointer without checking against limit. This can only
556   // be done if it was proved earlier that the allocation will succeed.
557   void FastAllocate(int object_size, Register result, Register scratch1,
558                     Register scratch2, AllocationFlags flags);
559 
560   void FastAllocate(Register object_size, Register result, Register result_new,
561                     Register scratch, AllocationFlags flags);
562 
563   void AllocateTwoByteString(Register result,
564                              Register length,
565                              Register scratch1,
566                              Register scratch2,
567                              Register scratch3,
568                              Label* gc_required);
569   void AllocateOneByteString(Register result, Register length,
570                              Register scratch1, Register scratch2,
571                              Register scratch3, Label* gc_required);
572   void AllocateTwoByteConsString(Register result,
573                                  Register length,
574                                  Register scratch1,
575                                  Register scratch2,
576                                  Label* gc_required);
577   void AllocateOneByteConsString(Register result, Register length,
578                                  Register scratch1, Register scratch2,
579                                  Label* gc_required);
580   void AllocateTwoByteSlicedString(Register result,
581                                    Register length,
582                                    Register scratch1,
583                                    Register scratch2,
584                                    Label* gc_required);
585   void AllocateOneByteSlicedString(Register result, Register length,
586                                    Register scratch1, Register scratch2,
587                                    Label* gc_required);
588 
589   // Allocates a heap number or jumps to the gc_required label if the young
590   // space is full and a scavenge is needed. All registers are clobbered also
591   // when control continues at the gc_required label.
592   void AllocateHeapNumber(Register result,
593                           Register scratch1,
594                           Register scratch2,
595                           Register heap_number_map,
596                           Label* gc_required,
597                           MutableMode mode = IMMUTABLE);
598   void AllocateHeapNumberWithValue(Register result,
599                                    FPURegister value,
600                                    Register scratch1,
601                                    Register scratch2,
602                                    Label* gc_required);
603 
604   // Allocate and initialize a JSValue wrapper with the specified {constructor}
605   // and {value}.
606   void AllocateJSValue(Register result, Register constructor, Register value,
607                        Register scratch1, Register scratch2,
608                        Label* gc_required);
609 
610   // ---------------------------------------------------------------------------
611   // Instruction macros.
612 
613 #define DEFINE_INSTRUCTION(instr)                                              \
614   void instr(Register rd, Register rs, const Operand& rt);                     \
615   void instr(Register rd, Register rs, Register rt) {                          \
616     instr(rd, rs, Operand(rt));                                                \
617   }                                                                            \
618   void instr(Register rs, Register rt, int32_t j) {                            \
619     instr(rs, rt, Operand(j));                                                 \
620   }
621 
622 #define DEFINE_INSTRUCTION2(instr)                                             \
623   void instr(Register rs, const Operand& rt);                                  \
624   void instr(Register rs, Register rt) {                                       \
625     instr(rs, Operand(rt));                                                    \
626   }                                                                            \
627   void instr(Register rs, int32_t j) {                                         \
628     instr(rs, Operand(j));                                                     \
629   }
630 
631 #define DEFINE_INSTRUCTION3(instr)                                             \
632   void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt);  \
633   void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) {       \
634     instr(rd_hi, rd_lo, rs, Operand(rt));                                      \
635   }                                                                            \
636   void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) {         \
637     instr(rd_hi, rd_lo, rs, Operand(j));                                       \
638   }
639 
640   DEFINE_INSTRUCTION(Addu);
641   DEFINE_INSTRUCTION(Subu);
642   DEFINE_INSTRUCTION(Mul);
643   DEFINE_INSTRUCTION(Div);
644   DEFINE_INSTRUCTION(Divu);
645   DEFINE_INSTRUCTION(Mod);
646   DEFINE_INSTRUCTION(Modu);
647   DEFINE_INSTRUCTION(Mulh);
648   DEFINE_INSTRUCTION2(Mult);
649   DEFINE_INSTRUCTION(Mulhu);
650   DEFINE_INSTRUCTION2(Multu);
651   DEFINE_INSTRUCTION2(Div);
652   DEFINE_INSTRUCTION2(Divu);
653 
654   DEFINE_INSTRUCTION3(Div);
655   DEFINE_INSTRUCTION3(Mul);
656   DEFINE_INSTRUCTION3(Mulu);
657 
658   DEFINE_INSTRUCTION(And);
659   DEFINE_INSTRUCTION(Or);
660   DEFINE_INSTRUCTION(Xor);
661   DEFINE_INSTRUCTION(Nor);
662   DEFINE_INSTRUCTION2(Neg);
663 
664   DEFINE_INSTRUCTION(Slt);
665   DEFINE_INSTRUCTION(Sltu);
666 
667   // MIPS32 R2 instruction macro.
668   DEFINE_INSTRUCTION(Ror);
669 
670 #undef DEFINE_INSTRUCTION
671 #undef DEFINE_INSTRUCTION2
672 #undef DEFINE_INSTRUCTION3
673 
674   // Load Scaled Address instructions. Parameter sa (shift argument) must be
675   // between [1, 31] (inclusive). On pre-r6 architectures the scratch register
676   // may be clobbered.
677   void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
678            Register scratch = at);
679 
680   void Pref(int32_t hint, const MemOperand& rs);
681 
682 
683   // ---------------------------------------------------------------------------
684   // Pseudo-instructions.
685 
686   // Change endianness
687   void ByteSwapSigned(Register dest, Register src, int operand_size);
688   void ByteSwapUnsigned(Register dest, Register src, int operand_size);
689 
mov(Register rd,Register rt)690   void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
691 
692   void Ulh(Register rd, const MemOperand& rs);
693   void Ulhu(Register rd, const MemOperand& rs);
694   void Ush(Register rd, const MemOperand& rs, Register scratch);
695 
696   void Ulw(Register rd, const MemOperand& rs);
697   void Usw(Register rd, const MemOperand& rs);
698 
699   void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch);
700   void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch);
701 
702   void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
703   void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
704 
705   // Load int32 in the rd register.
706   void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
707   inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
708     li(rd, Operand(j), mode);
709   }
710   void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE);
711 
712   // Push multiple registers on the stack.
713   // Registers are saved in numerical order, with higher numbered registers
714   // saved in higher memory addresses.
715   void MultiPush(RegList regs);
716   void MultiPushReversed(RegList regs);
717 
718   void MultiPushFPU(RegList regs);
719   void MultiPushReversedFPU(RegList regs);
720 
push(Register src)721   void push(Register src) {
722     Addu(sp, sp, Operand(-kPointerSize));
723     sw(src, MemOperand(sp, 0));
724   }
Push(Register src)725   void Push(Register src) { push(src); }
726 
727   // Push a handle.
728   void Push(Handle<Object> handle);
Push(Smi * smi)729   void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
730 
731   // Push two registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2)732   void Push(Register src1, Register src2) {
733     Subu(sp, sp, Operand(2 * kPointerSize));
734     sw(src1, MemOperand(sp, 1 * kPointerSize));
735     sw(src2, MemOperand(sp, 0 * kPointerSize));
736   }
737 
738   // Push three registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3)739   void Push(Register src1, Register src2, Register src3) {
740     Subu(sp, sp, Operand(3 * kPointerSize));
741     sw(src1, MemOperand(sp, 2 * kPointerSize));
742     sw(src2, MemOperand(sp, 1 * kPointerSize));
743     sw(src3, MemOperand(sp, 0 * kPointerSize));
744   }
745 
746   // Push four registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4)747   void Push(Register src1, Register src2, Register src3, Register src4) {
748     Subu(sp, sp, Operand(4 * kPointerSize));
749     sw(src1, MemOperand(sp, 3 * kPointerSize));
750     sw(src2, MemOperand(sp, 2 * kPointerSize));
751     sw(src3, MemOperand(sp, 1 * kPointerSize));
752     sw(src4, MemOperand(sp, 0 * kPointerSize));
753   }
754 
755   // Push five registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4,Register src5)756   void Push(Register src1, Register src2, Register src3, Register src4,
757             Register src5) {
758     Subu(sp, sp, Operand(5 * kPointerSize));
759     sw(src1, MemOperand(sp, 4 * kPointerSize));
760     sw(src2, MemOperand(sp, 3 * kPointerSize));
761     sw(src3, MemOperand(sp, 2 * kPointerSize));
762     sw(src4, MemOperand(sp, 1 * kPointerSize));
763     sw(src5, MemOperand(sp, 0 * kPointerSize));
764   }
765 
Push(Register src,Condition cond,Register tst1,Register tst2)766   void Push(Register src, Condition cond, Register tst1, Register tst2) {
767     // Since we don't have conditional execution we use a Branch.
768     Branch(3, cond, tst1, Operand(tst2));
769     Subu(sp, sp, Operand(kPointerSize));
770     sw(src, MemOperand(sp, 0));
771   }
772 
773   // Pops multiple values from the stack and load them in the
774   // registers specified in regs. Pop order is the opposite as in MultiPush.
775   void MultiPop(RegList regs);
776   void MultiPopReversed(RegList regs);
777 
778   void MultiPopFPU(RegList regs);
779   void MultiPopReversedFPU(RegList regs);
780 
pop(Register dst)781   void pop(Register dst) {
782     lw(dst, MemOperand(sp, 0));
783     Addu(sp, sp, Operand(kPointerSize));
784   }
Pop(Register dst)785   void Pop(Register dst) { pop(dst); }
786 
787   // Pop two registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2)788   void Pop(Register src1, Register src2) {
789     DCHECK(!src1.is(src2));
790     lw(src2, MemOperand(sp, 0 * kPointerSize));
791     lw(src1, MemOperand(sp, 1 * kPointerSize));
792     Addu(sp, sp, 2 * kPointerSize);
793   }
794 
795   // Pop three registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3)796   void Pop(Register src1, Register src2, Register src3) {
797     lw(src3, MemOperand(sp, 0 * kPointerSize));
798     lw(src2, MemOperand(sp, 1 * kPointerSize));
799     lw(src1, MemOperand(sp, 2 * kPointerSize));
800     Addu(sp, sp, 3 * kPointerSize);
801   }
802 
803   void Pop(uint32_t count = 1) {
804     Addu(sp, sp, Operand(count * kPointerSize));
805   }
806 
807   // Push a fixed frame, consisting of ra, fp.
808   void PushCommonFrame(Register marker_reg = no_reg);
809 
810   // Push a standard frame, consisting of ra, fp, context and JS function.
811   void PushStandardFrame(Register function_reg);
812 
813   void PopCommonFrame(Register marker_reg = no_reg);
814 
815   // Push and pop the registers that can hold pointers, as defined by the
816   // RegList constant kSafepointSavedRegisters.
817   void PushSafepointRegisters();
818   void PopSafepointRegisters();
819   // Store value in register src in the safepoint stack slot for
820   // register dst.
821   void StoreToSafepointRegisterSlot(Register src, Register dst);
822   // Load the value of the src register from its safepoint stack slot
823   // into register dst.
824   void LoadFromSafepointRegisterSlot(Register dst, Register src);
825 
826   // MIPS32 R2 instruction macro.
827   void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
828   void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
829   void Seb(Register rd, Register rt);
830   void Seh(Register rd, Register rt);
831   void Neg_s(FPURegister fd, FPURegister fs);
832   void Neg_d(FPURegister fd, FPURegister fs);
833 
834   // MIPS32 R6 instruction macros.
835   void Bovc(Register rt, Register rs, Label* L);
836   void Bnvc(Register rt, Register rs, Label* L);
837 
838   // Int64Lowering instructions
839   void AddPair(Register dst_low, Register dst_high, Register left_low,
840                Register left_high, Register right_low, Register right_high);
841 
842   void SubPair(Register dst_low, Register dst_high, Register left_low,
843                Register left_high, Register right_low, Register right_high);
844 
845   void ShlPair(Register dst_low, Register dst_high, Register src_low,
846                Register src_high, Register shift);
847 
848   void ShlPair(Register dst_low, Register dst_high, Register src_low,
849                Register src_high, uint32_t shift);
850 
851   void ShrPair(Register dst_low, Register dst_high, Register src_low,
852                Register src_high, Register shift);
853 
854   void ShrPair(Register dst_low, Register dst_high, Register src_low,
855                Register src_high, uint32_t shift);
856 
857   void SarPair(Register dst_low, Register dst_high, Register src_low,
858                Register src_high, Register shift);
859 
860   void SarPair(Register dst_low, Register dst_high, Register src_low,
861                Register src_high, uint32_t shift);
862 
863   // ---------------------------------------------------------------------------
864   // FPU macros. These do not handle special cases like NaN or +- inf.
865 
866   // Convert unsigned word to double.
867   void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
868 
869   // Convert single to unsigned word.
870   void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
871   void Trunc_uw_s(FPURegister fd, Register rs, FPURegister scratch);
872 
873   // Convert double to unsigned word.
874   void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
875   void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
876 
877   void Trunc_w_d(FPURegister fd, FPURegister fs);
878   void Round_w_d(FPURegister fd, FPURegister fs);
879   void Floor_w_d(FPURegister fd, FPURegister fs);
880   void Ceil_w_d(FPURegister fd, FPURegister fs);
881 
882   // FP32 mode: Move the general purpose register into
883   // the high part of the double-register pair.
884   // FP64 mode: Move the general-purpose register into
885   // the higher 32 bits of the 64-bit coprocessor register,
886   // while leaving the low bits unchanged.
887   void Mthc1(Register rt, FPURegister fs);
888 
889   // FP32 mode: move the high part of the double-register pair into
890   // general purpose register.
891   // FP64 mode: Move the higher 32 bits of the 64-bit coprocessor register into
892   // general-purpose register.
893   void Mfhc1(Register rt, FPURegister fs);
894 
895   // Wrapper functions for the different cmp/branch types.
896   inline void BranchF32(Label* target, Label* nan, Condition cc,
897                         FPURegister cmp1, FPURegister cmp2,
898                         BranchDelaySlot bd = PROTECT) {
899     BranchFCommon(S, target, nan, cc, cmp1, cmp2, bd);
900   }
901 
902   inline void BranchF64(Label* target, Label* nan, Condition cc,
903                         FPURegister cmp1, FPURegister cmp2,
904                         BranchDelaySlot bd = PROTECT) {
905     BranchFCommon(D, target, nan, cc, cmp1, cmp2, bd);
906   }
907 
908   // Alternate (inline) version for better readability with USE_DELAY_SLOT.
BranchF64(BranchDelaySlot bd,Label * target,Label * nan,Condition cc,FPURegister cmp1,FPURegister cmp2)909   inline void BranchF64(BranchDelaySlot bd, Label* target, Label* nan,
910                         Condition cc, FPURegister cmp1, FPURegister cmp2) {
911     BranchF64(target, nan, cc, cmp1, cmp2, bd);
912   }
913 
BranchF32(BranchDelaySlot bd,Label * target,Label * nan,Condition cc,FPURegister cmp1,FPURegister cmp2)914   inline void BranchF32(BranchDelaySlot bd, Label* target, Label* nan,
915                         Condition cc, FPURegister cmp1, FPURegister cmp2) {
916     BranchF32(target, nan, cc, cmp1, cmp2, bd);
917   }
918 
919   // Alias functions for backward compatibility.
920   inline void BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1,
921                       FPURegister cmp2, BranchDelaySlot bd = PROTECT) {
922     BranchF64(target, nan, cc, cmp1, cmp2, bd);
923   }
924 
BranchF(BranchDelaySlot bd,Label * target,Label * nan,Condition cc,FPURegister cmp1,FPURegister cmp2)925   inline void BranchF(BranchDelaySlot bd, Label* target, Label* nan,
926                       Condition cc, FPURegister cmp1, FPURegister cmp2) {
927     BranchF64(bd, target, nan, cc, cmp1, cmp2);
928   }
929 
930   // Truncates a double using a specific rounding mode, and writes the value
931   // to the result register.
932   // The except_flag will contain any exceptions caused by the instruction.
933   // If check_inexact is kDontCheckForInexactConversion, then the inexact
934   // exception is masked.
935   void EmitFPUTruncate(FPURoundingMode rounding_mode,
936                        Register result,
937                        DoubleRegister double_input,
938                        Register scratch,
939                        DoubleRegister double_scratch,
940                        Register except_flag,
941                        CheckForInexactConversion check_inexact
942                            = kDontCheckForInexactConversion);
943 
944   // Performs a truncating conversion of a floating point number as used by
945   // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
946   // succeeds, otherwise falls through if result is saturated. On return
947   // 'result' either holds answer, or is clobbered on fall through.
948   //
949   // Only public for the test code in test-code-stubs-arm.cc.
950   void TryInlineTruncateDoubleToI(Register result,
951                                   DoubleRegister input,
952                                   Label* done);
953 
954   // Performs a truncating conversion of a floating point number as used by
955   // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
956   // Exits with 'result' holding the answer.
957   void TruncateDoubleToI(Register result, DoubleRegister double_input);
958 
959   // Performs a truncating conversion of a heap number as used by
960   // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
961   // must be different registers. Exits with 'result' holding the answer.
962   void TruncateHeapNumberToI(Register result, Register object);
963 
964   // Converts the smi or heap number in object to an int32 using the rules
965   // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
966   // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
967   // different registers.
968   void TruncateNumberToI(Register object,
969                          Register result,
970                          Register heap_number_map,
971                          Register scratch,
972                          Label* not_int32);
973 
974   // Loads the number from object into dst register.
975   // If |object| is neither smi nor heap number, |not_number| is jumped to
976   // with |object| still intact.
977   void LoadNumber(Register object,
978                   FPURegister dst,
979                   Register heap_number_map,
980                   Register scratch,
981                   Label* not_number);
982 
983   // Loads the number from object into double_dst in the double format.
984   // Control will jump to not_int32 if the value cannot be exactly represented
985   // by a 32-bit integer.
986   // Floating point value in the 32-bit integer range that are not exact integer
987   // won't be loaded.
988   void LoadNumberAsInt32Double(Register object,
989                                DoubleRegister double_dst,
990                                Register heap_number_map,
991                                Register scratch1,
992                                Register scratch2,
993                                FPURegister double_scratch,
994                                Label* not_int32);
995 
996   // Loads the number from object into dst as a 32-bit integer.
997   // Control will jump to not_int32 if the object cannot be exactly represented
998   // by a 32-bit integer.
999   // Floating point value in the 32-bit integer range that are not exact integer
1000   // won't be converted.
1001   void LoadNumberAsInt32(Register object,
1002                          Register dst,
1003                          Register heap_number_map,
1004                          Register scratch1,
1005                          Register scratch2,
1006                          FPURegister double_scratch0,
1007                          FPURegister double_scratch1,
1008                          Label* not_int32);
1009 
1010   // Enter exit frame.
1011   // argc - argument count to be dropped by LeaveExitFrame.
1012   // save_doubles - saves FPU registers on stack, currently disabled.
1013   // stack_space - extra stack space.
1014   void EnterExitFrame(bool save_doubles, int stack_space = 0,
1015                       StackFrame::Type frame_type = StackFrame::EXIT);
1016 
1017   // Leave the current exit frame.
1018   void LeaveExitFrame(bool save_doubles, Register arg_count,
1019                       bool restore_context, bool do_return = NO_EMIT_RETURN,
1020                       bool argument_count_is_length = false);
1021 
1022   // Get the actual activation frame alignment for target environment.
1023   static int ActivationFrameAlignment();
1024 
1025   // Make sure the stack is aligned. Only emits code in debug mode.
1026   void AssertStackIsAligned();
1027 
1028   void LoadContext(Register dst, int context_chain_length);
1029 
1030   // Load the global object from the current context.
LoadGlobalObject(Register dst)1031   void LoadGlobalObject(Register dst) {
1032     LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
1033   }
1034 
1035   // Load the global proxy from the current context.
LoadGlobalProxy(Register dst)1036   void LoadGlobalProxy(Register dst) {
1037     LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
1038   }
1039 
1040   // Conditionally load the cached Array transitioned map of type
1041   // transitioned_kind from the native context if the map in register
1042   // map_in_out is the cached Array map in the native context of
1043   // expected_kind.
1044   void LoadTransitionedArrayMapConditional(
1045       ElementsKind expected_kind,
1046       ElementsKind transitioned_kind,
1047       Register map_in_out,
1048       Register scratch,
1049       Label* no_map_match);
1050 
1051   void LoadNativeContextSlot(int index, Register dst);
1052 
1053   // Load the initial map from the global function. The registers
1054   // function and map can be the same, function is then overwritten.
1055   void LoadGlobalFunctionInitialMap(Register function,
1056                                     Register map,
1057                                     Register scratch);
1058 
InitializeRootRegister()1059   void InitializeRootRegister() {
1060     ExternalReference roots_array_start =
1061         ExternalReference::roots_array_start(isolate());
1062     li(kRootRegister, Operand(roots_array_start));
1063   }
1064 
1065   // -------------------------------------------------------------------------
1066   // JavaScript invokes.
1067 
1068   // Removes current frame and its arguments from the stack preserving
1069   // the arguments and a return address pushed to the stack for the next call.
1070   // Both |callee_args_count| and |caller_args_count_reg| do not include
1071   // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
1072   // is trashed.
1073   void PrepareForTailCall(const ParameterCount& callee_args_count,
1074                           Register caller_args_count_reg, Register scratch0,
1075                           Register scratch1);
1076 
1077   // Invoke the JavaScript function code by either calling or jumping.
1078   void InvokeFunctionCode(Register function, Register new_target,
1079                           const ParameterCount& expected,
1080                           const ParameterCount& actual, InvokeFlag flag,
1081                           const CallWrapper& call_wrapper);
1082 
1083   void FloodFunctionIfStepping(Register fun, Register new_target,
1084                                const ParameterCount& expected,
1085                                const ParameterCount& actual);
1086 
1087   // Invoke the JavaScript function in the given register. Changes the
1088   // current context to the context in the function before invoking.
1089   void InvokeFunction(Register function,
1090                       Register new_target,
1091                       const ParameterCount& actual,
1092                       InvokeFlag flag,
1093                       const CallWrapper& call_wrapper);
1094 
1095   void InvokeFunction(Register function,
1096                       const ParameterCount& expected,
1097                       const ParameterCount& actual,
1098                       InvokeFlag flag,
1099                       const CallWrapper& call_wrapper);
1100 
1101   void InvokeFunction(Handle<JSFunction> function,
1102                       const ParameterCount& expected,
1103                       const ParameterCount& actual,
1104                       InvokeFlag flag,
1105                       const CallWrapper& call_wrapper);
1106 
1107   void IsObjectJSStringType(Register object,
1108                             Register scratch,
1109                             Label* fail);
1110 
1111   void IsObjectNameType(Register object,
1112                         Register scratch,
1113                         Label* fail);
1114 
1115   // -------------------------------------------------------------------------
1116   // Debugger Support.
1117 
1118   void DebugBreak();
1119 
1120   // -------------------------------------------------------------------------
1121   // Exception handling.
1122 
1123   // Push a new stack handler and link into stack handler chain.
1124   void PushStackHandler();
1125 
1126   // Unlink the stack handler on top of the stack from the stack handler chain.
1127   // Must preserve the result register.
1128   void PopStackHandler();
1129 
1130   // Initialize fields with filler values.  Fields starting at |current_address|
1131   // not including |end_address| are overwritten with the value in |filler|.  At
1132   // the end the loop, |current_address| takes the value of |end_address|.
1133   void InitializeFieldsWithFiller(Register current_address,
1134                                   Register end_address, Register filler);
1135 
1136   // -------------------------------------------------------------------------
1137   // Support functions.
1138 
1139   // Machine code version of Map::GetConstructor().
1140   // |temp| holds |result|'s map when done, and |temp2| its instance type.
1141   void GetMapConstructor(Register result, Register map, Register temp,
1142                          Register temp2);
1143 
1144   // Try to get function prototype of a function and puts the value in
1145   // the result register. Checks that the function really is a
1146   // function and jumps to the miss label if the fast checks fail. The
1147   // function register will be untouched; the other registers may be
1148   // clobbered.
1149   void TryGetFunctionPrototype(Register function, Register result,
1150                                Register scratch, Label* miss);
1151 
1152   void GetObjectType(Register function,
1153                      Register map,
1154                      Register type_reg);
1155 
GetInstanceType(Register object_map,Register object_instance_type)1156   void GetInstanceType(Register object_map, Register object_instance_type) {
1157     lbu(object_instance_type,
1158         FieldMemOperand(object_map, Map::kInstanceTypeOffset));
1159   }
1160 
1161   // Check if a map for a JSObject indicates that the object can have both smi
1162   // and HeapObject elements.  Jump to the specified label if it does not.
1163   void CheckFastObjectElements(Register map,
1164                                Register scratch,
1165                                Label* fail);
1166 
1167   // Check if a map for a JSObject indicates that the object has fast smi only
1168   // elements.  Jump to the specified label if it does not.
1169   void CheckFastSmiElements(Register map,
1170                             Register scratch,
1171                             Label* fail);
1172 
1173   // Check to see if maybe_number can be stored as a double in
1174   // FastDoubleElements. If it can, store it at the index specified by key in
1175   // the FastDoubleElements array elements. Otherwise jump to fail.
1176   void StoreNumberToDoubleElements(Register value_reg,
1177                                    Register key_reg,
1178                                    Register elements_reg,
1179                                    Register scratch1,
1180                                    Register scratch2,
1181                                    Register scratch3,
1182                                    Label* fail,
1183                                    int elements_offset = 0);
1184 
1185   // Compare an object's map with the specified map and its transitioned
1186   // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
1187   // "branch_to" if the result of the comparison is "cond". If multiple map
1188   // compares are required, the compare sequences branches to early_success.
1189   void CompareMapAndBranch(Register obj,
1190                            Register scratch,
1191                            Handle<Map> map,
1192                            Label* early_success,
1193                            Condition cond,
1194                            Label* branch_to);
1195 
1196   // As above, but the map of the object is already loaded into the register
1197   // which is preserved by the code generated.
1198   void CompareMapAndBranch(Register obj_map,
1199                            Handle<Map> map,
1200                            Label* early_success,
1201                            Condition cond,
1202                            Label* branch_to);
1203 
1204   // Check if the map of an object is equal to a specified map and branch to
1205   // label if not. Skip the smi check if not required (object is known to be a
1206   // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1207   // against maps that are ElementsKind transition maps of the specificed map.
1208   void CheckMap(Register obj,
1209                 Register scratch,
1210                 Handle<Map> map,
1211                 Label* fail,
1212                 SmiCheckType smi_check_type);
1213 
1214 
1215   void CheckMap(Register obj,
1216                 Register scratch,
1217                 Heap::RootListIndex index,
1218                 Label* fail,
1219                 SmiCheckType smi_check_type);
1220 
1221   // Check if the map of an object is equal to a specified weak map and branch
1222   // to a specified target if equal. Skip the smi check if not required
1223   // (object is known to be a heap object)
1224   void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
1225                        Handle<WeakCell> cell, Handle<Code> success,
1226                        SmiCheckType smi_check_type);
1227 
1228   // If the value is a NaN, canonicalize the value else, do nothing.
1229   void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
1230 
1231   // Get value of the weak cell.
1232   void GetWeakValue(Register value, Handle<WeakCell> cell);
1233 
1234   // Load the value of the weak cell in the value register. Branch to the
1235   // given miss label is the weak cell was cleared.
1236   void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
1237 
1238   // Load and check the instance type of an object for being a string.
1239   // Loads the type into the second argument register.
1240   // Returns a condition that will be enabled if the object was a string.
IsObjectStringType(Register obj,Register type,Register result)1241   Condition IsObjectStringType(Register obj,
1242                                Register type,
1243                                Register result) {
1244     lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
1245     lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
1246     And(type, type, Operand(kIsNotStringMask));
1247     DCHECK_EQ(0u, kStringTag);
1248     return eq;
1249   }
1250 
1251   // Get the number of least significant bits from a register.
1252   void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
1253   void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
1254 
1255   // Load the value of a number object into a FPU double register. If the
1256   // object is not a number a jump to the label not_number is performed
1257   // and the FPU double register is unchanged.
1258   void ObjectToDoubleFPURegister(
1259       Register object,
1260       FPURegister value,
1261       Register scratch1,
1262       Register scratch2,
1263       Register heap_number_map,
1264       Label* not_number,
1265       ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
1266 
1267   // Load the value of a smi object into a FPU double register. The register
1268   // scratch1 can be the same register as smi in which case smi will hold the
1269   // untagged value afterwards.
1270   void SmiToDoubleFPURegister(Register smi,
1271                               FPURegister value,
1272                               Register scratch1);
1273 
1274   // -------------------------------------------------------------------------
1275   // Overflow handling functions.
1276   // Usage: first call the appropriate arithmetic function, then call one of the
1277   // jump functions with the overflow_dst register as the second parameter.
1278 
1279   inline void AddBranchOvf(Register dst, Register left, const Operand& right,
1280                            Label* overflow_label, Register scratch = at) {
1281     AddBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
1282   }
1283 
1284   inline void AddBranchNoOvf(Register dst, Register left, const Operand& right,
1285                              Label* no_overflow_label, Register scratch = at) {
1286     AddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
1287   }
1288 
1289   void AddBranchOvf(Register dst, Register left, const Operand& right,
1290                     Label* overflow_label, Label* no_overflow_label,
1291                     Register scratch = at);
1292 
1293   void AddBranchOvf(Register dst, Register left, Register right,
1294                     Label* overflow_label, Label* no_overflow_label,
1295                     Register scratch = at);
1296 
1297 
1298   inline void SubBranchOvf(Register dst, Register left, const Operand& right,
1299                            Label* overflow_label, Register scratch = at) {
1300     SubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
1301   }
1302 
1303   inline void SubBranchNoOvf(Register dst, Register left, const Operand& right,
1304                              Label* no_overflow_label, Register scratch = at) {
1305     SubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
1306   }
1307 
1308   void SubBranchOvf(Register dst, Register left, const Operand& right,
1309                     Label* overflow_label, Label* no_overflow_label,
1310                     Register scratch = at);
1311 
1312   void SubBranchOvf(Register dst, Register left, Register right,
1313                     Label* overflow_label, Label* no_overflow_label,
1314                     Register scratch = at);
1315 
1316   inline void MulBranchOvf(Register dst, Register left, const Operand& right,
1317                            Label* overflow_label, Register scratch = at) {
1318     MulBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
1319   }
1320 
1321   inline void MulBranchNoOvf(Register dst, Register left, const Operand& right,
1322                              Label* no_overflow_label, Register scratch = at) {
1323     MulBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
1324   }
1325 
1326   void MulBranchOvf(Register dst, Register left, const Operand& right,
1327                     Label* overflow_label, Label* no_overflow_label,
1328                     Register scratch = at);
1329 
1330   void MulBranchOvf(Register dst, Register left, Register right,
1331                     Label* overflow_label, Label* no_overflow_label,
1332                     Register scratch = at);
1333 
1334   // -------------------------------------------------------------------------
1335   // Runtime calls.
1336 
1337   // See comments at the beginning of CEntryStub::Generate.
PrepareCEntryArgs(int num_args)1338   inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
1339 
PrepareCEntryFunction(const ExternalReference & ref)1340   inline void PrepareCEntryFunction(const ExternalReference& ref) {
1341     li(a1, Operand(ref));
1342   }
1343 
1344 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
1345 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
1346 
1347   // Call a code stub.
1348   void CallStub(CodeStub* stub,
1349                 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1350                 COND_ARGS);
1351 
1352   // Tail call a code stub (jump).
1353   void TailCallStub(CodeStub* stub, COND_ARGS);
1354 
1355 #undef COND_ARGS
1356 
1357   void CallJSExitStub(CodeStub* stub);
1358 
1359   // Call a runtime routine.
1360   void CallRuntime(const Runtime::Function* f, int num_arguments,
1361                    SaveFPRegsMode save_doubles = kDontSaveFPRegs,
1362                    BranchDelaySlot bd = PROTECT);
CallRuntimeSaveDoubles(Runtime::FunctionId id)1363   void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1364     const Runtime::Function* function = Runtime::FunctionForId(id);
1365     CallRuntime(function, function->nargs, kSaveFPRegs);
1366   }
1367 
1368   // Convenience function: Same as above, but takes the fid instead.
1369   void CallRuntime(Runtime::FunctionId fid,
1370                    SaveFPRegsMode save_doubles = kDontSaveFPRegs,
1371                    BranchDelaySlot bd = PROTECT) {
1372     const Runtime::Function* function = Runtime::FunctionForId(fid);
1373     CallRuntime(function, function->nargs, save_doubles, bd);
1374   }
1375 
1376   // Convenience function: Same as above, but takes the fid instead.
1377   void CallRuntime(Runtime::FunctionId id, int num_arguments,
1378                    SaveFPRegsMode save_doubles = kDontSaveFPRegs,
1379                    BranchDelaySlot bd = PROTECT) {
1380     CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles, bd);
1381   }
1382 
1383   // Convenience function: call an external reference.
1384   void CallExternalReference(const ExternalReference& ext,
1385                              int num_arguments,
1386                              BranchDelaySlot bd = PROTECT);
1387 
1388 
1389   // Convenience function: tail call a runtime routine (jump).
1390   void TailCallRuntime(Runtime::FunctionId fid);
1391 
1392   int CalculateStackPassedWords(int num_reg_arguments,
1393                                 int num_double_arguments);
1394 
1395   // Before calling a C-function from generated code, align arguments on stack
1396   // and add space for the four mips argument slots.
1397   // After aligning the frame, non-register arguments must be stored on the
1398   // stack, after the argument-slots using helper: CFunctionArgumentOperand().
1399   // The argument count assumes all arguments are word sized.
1400   // Some compilers/platforms require the stack to be aligned when calling
1401   // C++ code.
1402   // Needs a scratch register to do some arithmetic. This register will be
1403   // trashed.
1404   void PrepareCallCFunction(int num_reg_arguments,
1405                             int num_double_registers,
1406                             Register scratch);
1407   void PrepareCallCFunction(int num_reg_arguments,
1408                             Register scratch);
1409 
1410   // Arguments 1-4 are placed in registers a0 thru a3 respectively.
1411   // Arguments 5..n are stored to stack using following:
1412   //  sw(t0, CFunctionArgumentOperand(5));
1413 
1414   // Calls a C function and cleans up the space for arguments allocated
1415   // by PrepareCallCFunction. The called function is not allowed to trigger a
1416   // garbage collection, since that might move the code and invalidate the
1417   // return address (unless this is somehow accounted for by the called
1418   // function).
1419   void CallCFunction(ExternalReference function, int num_arguments);
1420   void CallCFunction(Register function, int num_arguments);
1421   void CallCFunction(ExternalReference function,
1422                      int num_reg_arguments,
1423                      int num_double_arguments);
1424   void CallCFunction(Register function,
1425                      int num_reg_arguments,
1426                      int num_double_arguments);
1427   void MovFromFloatResult(DoubleRegister dst);
1428   void MovFromFloatParameter(DoubleRegister dst);
1429 
1430   // There are two ways of passing double arguments on MIPS, depending on
1431   // whether soft or hard floating point ABI is used. These functions
1432   // abstract parameter passing for the three different ways we call
1433   // C functions from generated code.
1434   void MovToFloatParameter(DoubleRegister src);
1435   void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
1436   void MovToFloatResult(DoubleRegister src);
1437 
1438   // Jump to the builtin routine.
1439   void JumpToExternalReference(const ExternalReference& builtin,
1440                                BranchDelaySlot bd = PROTECT,
1441                                bool builtin_exit_frame = false);
1442 
1443   struct Unresolved {
1444     int pc;
1445     uint32_t flags;  // See Bootstrapper::FixupFlags decoders/encoders.
1446     const char* name;
1447   };
1448 
CodeObject()1449   Handle<Object> CodeObject() {
1450     DCHECK(!code_object_.is_null());
1451     return code_object_;
1452   }
1453 
1454   // Emit code for a truncating division by a constant. The dividend register is
1455   // unchanged and at gets clobbered. Dividend and result must be different.
1456   void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1457 
1458   // -------------------------------------------------------------------------
1459   // StatsCounter support.
1460 
1461   void SetCounter(StatsCounter* counter, int value,
1462                   Register scratch1, Register scratch2);
1463   void IncrementCounter(StatsCounter* counter, int value,
1464                         Register scratch1, Register scratch2);
1465   void DecrementCounter(StatsCounter* counter, int value,
1466                         Register scratch1, Register scratch2);
1467 
1468 
1469   // -------------------------------------------------------------------------
1470   // Debugging.
1471 
1472   // Calls Abort(msg) if the condition cc is not satisfied.
1473   // Use --debug_code to enable.
1474   void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
1475   void AssertFastElements(Register elements);
1476 
1477   // Like Assert(), but always enabled.
1478   void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
1479 
1480   // Print a message to stdout and abort execution.
1481   void Abort(BailoutReason msg);
1482 
1483   // Verify restrictions about code generated in stubs.
set_generating_stub(bool value)1484   void set_generating_stub(bool value) { generating_stub_ = value; }
generating_stub()1485   bool generating_stub() { return generating_stub_; }
set_has_frame(bool value)1486   void set_has_frame(bool value) { has_frame_ = value; }
has_frame()1487   bool has_frame() { return has_frame_; }
1488   inline bool AllowThisStubCall(CodeStub* stub);
1489 
1490   // ---------------------------------------------------------------------------
1491   // Number utilities.
1492 
1493   // Check whether the value of reg is a power of two and not zero. If not
1494   // control continues at the label not_power_of_two. If reg is a power of two
1495   // the register scratch contains the value of (reg - 1) when control falls
1496   // through.
1497   void JumpIfNotPowerOfTwoOrZero(Register reg,
1498                                  Register scratch,
1499                                  Label* not_power_of_two_or_zero);
1500 
1501   // -------------------------------------------------------------------------
1502   // Smi utilities.
1503 
SmiTag(Register reg)1504   void SmiTag(Register reg) {
1505     Addu(reg, reg, reg);
1506   }
1507 
SmiTag(Register dst,Register src)1508   void SmiTag(Register dst, Register src) { Addu(dst, src, src); }
1509 
1510   // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1511   void SmiTagCheckOverflow(Register reg, Register overflow);
1512   void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1513 
1514   void BranchOnOverflow(Label* label, Register overflow_check,
1515                         BranchDelaySlot bd = PROTECT) {
1516     Branch(label, lt, overflow_check, Operand(zero_reg), bd);
1517   }
1518 
1519   void BranchOnNoOverflow(Label* label, Register overflow_check,
1520                           BranchDelaySlot bd = PROTECT) {
1521     Branch(label, ge, overflow_check, Operand(zero_reg), bd);
1522   }
1523 
1524 
1525   // Try to convert int32 to smi. If the value is to large, preserve
1526   // the original value and jump to not_a_smi. Destroys scratch and
1527   // sets flags.
TrySmiTag(Register reg,Register scratch,Label * not_a_smi)1528   void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) {
1529     TrySmiTag(reg, reg, scratch, not_a_smi);
1530   }
TrySmiTag(Register dst,Register src,Register scratch,Label * not_a_smi)1531   void TrySmiTag(Register dst,
1532                  Register src,
1533                  Register scratch,
1534                  Label* not_a_smi) {
1535     SmiTagCheckOverflow(at, src, scratch);
1536     BranchOnOverflow(not_a_smi, scratch);
1537     mov(dst, at);
1538   }
1539 
SmiUntag(Register reg)1540   void SmiUntag(Register reg) {
1541     sra(reg, reg, kSmiTagSize);
1542   }
1543 
SmiUntag(Register dst,Register src)1544   void SmiUntag(Register dst, Register src) {
1545     sra(dst, src, kSmiTagSize);
1546   }
1547 
1548   // Test if the register contains a smi.
SmiTst(Register value,Register scratch)1549   inline void SmiTst(Register value, Register scratch) {
1550     And(scratch, value, Operand(kSmiTagMask));
1551   }
NonNegativeSmiTst(Register value,Register scratch)1552   inline void NonNegativeSmiTst(Register value, Register scratch) {
1553     And(scratch, value, Operand(kSmiTagMask | kSmiSignMask));
1554   }
1555 
1556   // Untag the source value into destination and jump if source is a smi.
1557   // Souce and destination can be the same register.
1558   void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1559 
1560   // Untag the source value into destination and jump if source is not a smi.
1561   // Souce and destination can be the same register.
1562   void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1563 
1564   // Jump the register contains a smi.
1565   void JumpIfSmi(Register value,
1566                  Label* smi_label,
1567                  Register scratch = at,
1568                  BranchDelaySlot bd = PROTECT);
1569 
1570   // Jump if the register contains a non-smi.
1571   void JumpIfNotSmi(Register value,
1572                     Label* not_smi_label,
1573                     Register scratch = at,
1574                     BranchDelaySlot bd = PROTECT);
1575 
1576   // Jump if either of the registers contain a non-smi.
1577   void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1578   // Jump if either of the registers contain a smi.
1579   void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1580 
1581   // Abort execution if argument is a number, enabled via --debug-code.
1582   void AssertNotNumber(Register object);
1583 
1584   // Abort execution if argument is a smi, enabled via --debug-code.
1585   void AssertNotSmi(Register object);
1586   void AssertSmi(Register object);
1587 
1588   // Abort execution if argument is not a string, enabled via --debug-code.
1589   void AssertString(Register object);
1590 
1591   // Abort execution if argument is not a name, enabled via --debug-code.
1592   void AssertName(Register object);
1593 
1594   // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1595   void AssertFunction(Register object);
1596 
1597   // Abort execution if argument is not a JSBoundFunction,
1598   // enabled via --debug-code.
1599   void AssertBoundFunction(Register object);
1600 
1601   // Abort execution if argument is not a JSGeneratorObject,
1602   // enabled via --debug-code.
1603   void AssertGeneratorObject(Register object);
1604 
1605   // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
1606   void AssertReceiver(Register object);
1607 
1608   // Abort execution if argument is not undefined or an AllocationSite, enabled
1609   // via --debug-code.
1610   void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1611 
1612   // Abort execution if reg is not the root value with the given index,
1613   // enabled via --debug-code.
1614   void AssertIsRoot(Register reg, Heap::RootListIndex index);
1615 
1616   // ---------------------------------------------------------------------------
1617   // HeapNumber utilities.
1618 
1619   void JumpIfNotHeapNumber(Register object,
1620                            Register heap_number_map,
1621                            Register scratch,
1622                            Label* on_not_heap_number);
1623 
1624   // -------------------------------------------------------------------------
1625   // String utilities.
1626 
1627   // Checks if both instance types are sequential ASCII strings and jumps to
1628   // label if either is not.
1629   void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1630       Register first_object_instance_type, Register second_object_instance_type,
1631       Register scratch1, Register scratch2, Label* failure);
1632 
1633   // Check if instance type is sequential one-byte string and jump to label if
1634   // it is not.
1635   void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1636                                                 Label* failure);
1637 
1638   void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1639 
1640   void EmitSeqStringSetCharCheck(Register string,
1641                                  Register index,
1642                                  Register value,
1643                                  Register scratch,
1644                                  uint32_t encoding_mask);
1645 
1646   // Checks if both objects are sequential one-byte strings and jumps to label
1647   // if either is not. Assumes that neither object is a smi.
1648   void JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,
1649                                                     Register second,
1650                                                     Register scratch1,
1651                                                     Register scratch2,
1652                                                     Label* failure);
1653 
1654   // Checks if both objects are sequential one-byte strings and jumps to label
1655   // if either is not.
1656   void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1657                                              Register scratch1,
1658                                              Register scratch2,
1659                                              Label* not_flat_one_byte_strings);
1660 
1661   void ClampUint8(Register output_reg, Register input_reg);
1662 
1663   void ClampDoubleToUint8(Register result_reg,
1664                           DoubleRegister input_reg,
1665                           DoubleRegister temp_double_reg);
1666 
1667 
1668   void LoadInstanceDescriptors(Register map, Register descriptors);
1669   void EnumLength(Register dst, Register map);
1670   void NumberOfOwnDescriptors(Register dst, Register map);
1671   void LoadAccessor(Register dst, Register holder, int accessor_index,
1672                     AccessorComponent accessor);
1673 
1674   template<typename Field>
DecodeField(Register dst,Register src)1675   void DecodeField(Register dst, Register src) {
1676     Ext(dst, src, Field::kShift, Field::kSize);
1677   }
1678 
1679   template<typename Field>
DecodeField(Register reg)1680   void DecodeField(Register reg) {
1681     DecodeField<Field>(reg, reg);
1682   }
1683 
1684   template<typename Field>
DecodeFieldToSmi(Register dst,Register src)1685   void DecodeFieldToSmi(Register dst, Register src) {
1686     static const int shift = Field::kShift;
1687     static const int mask = Field::kMask >> shift << kSmiTagSize;
1688     STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
1689     STATIC_ASSERT(kSmiTag == 0);
1690     if (shift < kSmiTagSize) {
1691       sll(dst, src, kSmiTagSize - shift);
1692       And(dst, dst, Operand(mask));
1693     } else if (shift > kSmiTagSize) {
1694       srl(dst, src, shift - kSmiTagSize);
1695       And(dst, dst, Operand(mask));
1696     } else {
1697       And(dst, src, Operand(mask));
1698     }
1699   }
1700 
1701   template<typename Field>
DecodeFieldToSmi(Register reg)1702   void DecodeFieldToSmi(Register reg) {
1703     DecodeField<Field>(reg, reg);
1704   }
1705 
1706   // Generates function and stub prologue code.
1707   void StubPrologue(StackFrame::Type type);
1708   void Prologue(bool code_pre_aging);
1709 
1710   // Load the type feedback vector from a JavaScript frame.
1711   void EmitLoadTypeFeedbackVector(Register vector);
1712 
1713   // Activation support.
1714   void EnterFrame(StackFrame::Type type);
1715   void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
1716   void LeaveFrame(StackFrame::Type type);
1717 
1718   void EnterBuiltinFrame(Register context, Register target, Register argc);
1719   void LeaveBuiltinFrame(Register context, Register target, Register argc);
1720 
1721   // Expects object in a0 and returns map with validated enum cache
1722   // in a0.  Assumes that any other register can be used as a scratch.
1723   void CheckEnumCache(Label* call_runtime);
1724 
1725   // AllocationMemento support. Arrays may have an associated AllocationMemento
1726   // object that can be checked for in order to pretransition to another type.
1727   // On entry, receiver_reg should point to the array object. scratch_reg gets
1728   // clobbered. If no info is present jump to no_memento_found, otherwise fall
1729   // through.
1730   void TestJSArrayForAllocationMemento(Register receiver_reg,
1731                                        Register scratch_reg,
1732                                        Label* no_memento_found);
1733 
JumpIfJSArrayHasAllocationMemento(Register receiver_reg,Register scratch_reg,Label * memento_found)1734   void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1735                                          Register scratch_reg,
1736                                          Label* memento_found) {
1737     Label no_memento_found;
1738     TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1739                                     &no_memento_found);
1740     Branch(memento_found);
1741     bind(&no_memento_found);
1742   }
1743 
1744   // Jumps to found label if a prototype map has dictionary elements.
1745   void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1746                                         Register scratch1, Label* found);
1747 
IsDoubleZeroRegSet()1748   bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
1749 
1750  private:
1751   void CallCFunctionHelper(Register function,
1752                            int num_reg_arguments,
1753                            int num_double_arguments);
1754 
1755   inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
1756   inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
1757   void BranchShortHelperR6(int32_t offset, Label* L);
1758   void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
1759   bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
1760                            Register rs, const Operand& rt);
1761   bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
1762                          const Operand& rt, BranchDelaySlot bdslot);
1763   bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
1764                         const Operand& rt, BranchDelaySlot bdslot);
1765 
1766   void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
1767   void BranchAndLinkShortHelper(int16_t offset, Label* L,
1768                                 BranchDelaySlot bdslot);
1769   void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
1770   void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
1771   bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
1772                                   Register rs, const Operand& rt);
1773   bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
1774                                 Register rs, const Operand& rt,
1775                                 BranchDelaySlot bdslot);
1776   bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
1777                                Register rs, const Operand& rt,
1778                                BranchDelaySlot bdslot);
1779   void BranchLong(Label* L, BranchDelaySlot bdslot);
1780   void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
1781 
1782   // Common implementation of BranchF functions for the different formats.
1783   void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
1784                      Condition cc, FPURegister cmp1, FPURegister cmp2,
1785                      BranchDelaySlot bd = PROTECT);
1786 
1787   void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
1788                     FPURegister cmp1, FPURegister cmp2,
1789                     BranchDelaySlot bd = PROTECT);
1790 
1791   // Helper functions for generating invokes.
1792   void InvokePrologue(const ParameterCount& expected,
1793                       const ParameterCount& actual,
1794                       Label* done,
1795                       bool* definitely_mismatches,
1796                       InvokeFlag flag,
1797                       const CallWrapper& call_wrapper);
1798 
1799   void InitializeNewString(Register string,
1800                            Register length,
1801                            Heap::RootListIndex map_index,
1802                            Register scratch1,
1803                            Register scratch2);
1804 
1805   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1806   void InNewSpace(Register object, Register scratch,
1807                   Condition cond,  // ne for new space, eq otherwise.
1808                   Label* branch);
1809 
1810   // Helper for finding the mark bits for an address.  Afterwards, the
1811   // bitmap register points at the word with the mark bits and the mask
1812   // the position of the first bit.  Leaves addr_reg unchanged.
1813   inline void GetMarkBits(Register addr_reg,
1814                           Register bitmap_reg,
1815                           Register mask_reg);
1816 
1817   // Compute memory operands for safepoint stack slots.
1818   static int SafepointRegisterStackIndex(int reg_code);
1819   MemOperand SafepointRegisterSlot(Register reg);
1820   MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1821 
1822   bool generating_stub_;
1823   bool has_frame_;
1824   bool has_double_zero_reg_set_;
1825   // This handle will be patched with the code object on installation.
1826   Handle<Object> code_object_;
1827 
1828   // Needs access to SafepointRegisterStackIndex for compiled frame
1829   // traversal.
1830   friend class StandardFrame;
1831 };
1832 
1833 
1834 // The code patcher is used to patch (typically) small parts of code e.g. for
1835 // debugging and other types of instrumentation. When using the code patcher
1836 // the exact number of bytes specified must be emitted. It is not legal to emit
1837 // relocation information. If any of these constraints are violated it causes
1838 // an assertion to fail.
1839 class CodePatcher {
1840  public:
1841   enum FlushICache {
1842     FLUSH,
1843     DONT_FLUSH
1844   };
1845 
1846   CodePatcher(Isolate* isolate, byte* address, int instructions,
1847               FlushICache flush_cache = FLUSH);
1848   ~CodePatcher();
1849 
1850   // Macro assembler to emit code.
masm()1851   MacroAssembler* masm() { return &masm_; }
1852 
1853   // Emit an instruction directly.
1854   void Emit(Instr instr);
1855 
1856   // Emit an address directly.
1857   void Emit(Address addr);
1858 
1859   // Change the condition part of an instruction leaving the rest of the current
1860   // instruction unchanged.
1861   void ChangeBranchCondition(Instr current_instr, uint32_t new_opcode);
1862 
1863  private:
1864   byte* address_;  // The address of the code being patched.
1865   int size_;  // Number of bytes of the expected patch size.
1866   MacroAssembler masm_;  // Macro assembler used to generate the code.
1867   FlushICache flush_cache_;  // Whether to flush the I cache after patching.
1868 };
1869 
1870 template <typename Func>
GenerateSwitchTable(Register index,size_t case_count,Func GetLabelFunction)1871 void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
1872                                          Func GetLabelFunction) {
1873   if (kArchVariant >= kMips32r6) {
1874     BlockTrampolinePoolFor(case_count + 5);
1875     addiupc(at, 5);
1876     Lsa(at, at, index, kPointerSizeLog2);
1877     lw(at, MemOperand(at));
1878   } else {
1879     Label here;
1880     BlockTrampolinePoolFor(case_count + 10);
1881     push(ra);
1882     bal(&here);
1883     sll(at, index, kPointerSizeLog2);  // Branch delay slot.
1884     bind(&here);
1885     addu(at, at, ra);
1886     pop(ra);
1887     lw(at, MemOperand(at, 6 * v8::internal::Assembler::kInstrSize));
1888   }
1889   jr(at);
1890   nop();  // Branch delay slot nop.
1891   for (size_t index = 0; index < case_count; ++index) {
1892     dd(GetLabelFunction(index));
1893   }
1894 }
1895 
1896 #define ACCESS_MASM(masm) masm->
1897 
1898 }  // namespace internal
1899 }  // namespace v8
1900 
1901 #endif  // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
1902