• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
29 #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
30 
31 #include "assembler.h"
32 #include "mips/assembler-mips.h"
33 #include "v8globals.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 // Forward declaration.
39 class JumpTarget;
40 
41 // Reserved Register Usage Summary.
42 //
43 // Registers t8, t9, and at are reserved for use by the MacroAssembler.
44 //
45 // The programmer should know that the MacroAssembler may clobber these three,
46 // but won't touch other registers except in special cases.
47 //
48 // Per the MIPS ABI, register t9 must be used for indirect function call
49 // via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
50 // trying to update gp register for position-independent-code. Whenever
51 // MIPS generated code calls C code, it must be via t9 register.
52 
53 
54 // Flags used for the AllocateInNewSpace functions.
55 enum AllocationFlags {
56   // No special flags.
57   NO_ALLOCATION_FLAGS = 0,
58   // Return the pointer to the allocated already tagged as a heap object.
59   TAG_OBJECT = 1 << 0,
60   // The content of the result register already contains the allocation top in
61   // new space.
62   RESULT_CONTAINS_TOP = 1 << 1,
63   // Specify that the requested size of the space to allocate is specified in
64   // words instead of bytes.
65   SIZE_IN_WORDS = 1 << 2
66 };
67 
68 // Flags used for the ObjectToDoubleFPURegister function.
69 enum ObjectToDoubleFlags {
70   // No special flags.
71   NO_OBJECT_TO_DOUBLE_FLAGS = 0,
72   // Object is known to be a non smi.
73   OBJECT_NOT_SMI = 1 << 0,
74   // Don't load NaNs or infinities, branch to the non number case instead.
75   AVOID_NANS_AND_INFINITIES = 1 << 1
76 };
77 
78 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
79 enum BranchDelaySlot {
80   USE_DELAY_SLOT,
81   PROTECT
82 };
83 
84 // Flags used for the li macro-assembler function.
85 enum LiFlags {
86   // If the constant value can be represented in just 16 bits, then
87   // optimize the li to use a single instruction, rather than lui/ori pair.
88   OPTIMIZE_SIZE = 0,
89   // Always use 2 instructions (lui/ori pair), even if the constant could
90   // be loaded with just one, so that this value is patchable later.
91   CONSTANT_SIZE = 1
92 };
93 
94 
95 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
96 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
97 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
98 
99 bool AreAliased(Register r1, Register r2, Register r3, Register r4);
100 
101 
102 // -----------------------------------------------------------------------------
103 // Static helper functions.
104 
ContextOperand(Register context,int index)105 inline MemOperand ContextOperand(Register context, int index) {
106   return MemOperand(context, Context::SlotOffset(index));
107 }
108 
109 
GlobalObjectOperand()110 inline MemOperand GlobalObjectOperand()  {
111   return ContextOperand(cp, Context::GLOBAL_INDEX);
112 }
113 
114 
115 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object,int offset)116 inline MemOperand FieldMemOperand(Register object, int offset) {
117   return MemOperand(object, offset - kHeapObjectTag);
118 }
119 
120 
121 // Generate a MemOperand for storing arguments 5..N on the stack
122 // when calling CallCFunction().
CFunctionArgumentOperand(int index)123 inline MemOperand CFunctionArgumentOperand(int index) {
124   ASSERT(index > kCArgSlotCount);
125   // Argument 5 takes the slot just past the four Arg-slots.
126   int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
127   return MemOperand(sp, offset);
128 }
129 
130 
131 // MacroAssembler implements a collection of frequently used macros.
132 class MacroAssembler: public Assembler {
133  public:
134   // The isolate parameter can be NULL if the macro assembler should
135   // not use isolate-dependent functionality. In this case, it's the
136   // responsibility of the caller to never invoke such function on the
137   // macro assembler.
138   MacroAssembler(Isolate* isolate, void* buffer, int size);
139 
140   // Arguments macros.
141 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
142 #define COND_ARGS cond, r1, r2
143 
144   // Cases when relocation is not needed.
145 #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
146   void Name(target_type target, BranchDelaySlot bd = PROTECT); \
147   inline void Name(BranchDelaySlot bd, target_type target) { \
148     Name(target, bd); \
149   } \
150   void Name(target_type target, \
151             COND_TYPED_ARGS, \
152             BranchDelaySlot bd = PROTECT); \
153   inline void Name(BranchDelaySlot bd, \
154                    target_type target, \
155                    COND_TYPED_ARGS) { \
156     Name(target, COND_ARGS, bd); \
157   }
158 
159 #define DECLARE_BRANCH_PROTOTYPES(Name) \
160   DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
161   DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
162 
163   DECLARE_BRANCH_PROTOTYPES(Branch)
164   DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
165 
166 #undef DECLARE_BRANCH_PROTOTYPES
167 #undef COND_TYPED_ARGS
168 #undef COND_ARGS
169 
170 
171   // Jump, Call, and Ret pseudo instructions implementing inter-working.
172 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
173   const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
174 
175   void Jump(Register target, COND_ARGS);
176   void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
177   void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
178   void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
179   static int CallSize(Register target, COND_ARGS);
180   void Call(Register target, COND_ARGS);
181   static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
182   void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
183   static int CallSize(Handle<Code> code,
184                       RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
185                       unsigned ast_id = kNoASTId,
186                       COND_ARGS);
187   void Call(Handle<Code> code,
188             RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
189             unsigned ast_id = kNoASTId,
190             COND_ARGS);
191   void Ret(COND_ARGS);
192   inline void Ret(BranchDelaySlot bd, Condition cond = al,
193     Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
194     Ret(cond, rs, rt, bd);
195   }
196 
197   void Branch(Label* L,
198               Condition cond,
199               Register rs,
200               Heap::RootListIndex index,
201               BranchDelaySlot bdslot = PROTECT);
202 
203 #undef COND_ARGS
204 
205   // Emit code to discard a non-negative number of pointer-sized elements
206   // from the stack, clobbering only the sp register.
207   void Drop(int count,
208             Condition cond = cc_always,
209             Register reg = no_reg,
210             const Operand& op = Operand(no_reg));
211 
212   // Trivial case of DropAndRet that utilizes the delay slot and only emits
213   // 2 instructions.
214   void DropAndRet(int drop);
215 
216   void DropAndRet(int drop,
217                   Condition cond,
218                   Register reg,
219                   const Operand& op);
220 
221   // Swap two registers.  If the scratch register is omitted then a slightly
222   // less efficient form using xor instead of mov is emitted.
223   void Swap(Register reg1, Register reg2, Register scratch = no_reg);
224 
225   void Call(Label* target);
226 
Move(Register dst,Register src)227   inline void Move(Register dst, Register src) {
228     if (!dst.is(src)) {
229       mov(dst, src);
230     }
231   }
232 
Move(FPURegister dst,FPURegister src)233   inline void Move(FPURegister dst, FPURegister src) {
234     if (!dst.is(src)) {
235       mov_d(dst, src);
236     }
237   }
238 
Move(Register dst_low,Register dst_high,FPURegister src)239   inline void Move(Register dst_low, Register dst_high, FPURegister src) {
240     mfc1(dst_low, src);
241     mfc1(dst_high, FPURegister::from_code(src.code() + 1));
242   }
243 
Move(FPURegister dst,Register src_low,Register src_high)244   inline void Move(FPURegister dst, Register src_low, Register src_high) {
245     mtc1(src_low, dst);
246     mtc1(src_high, FPURegister::from_code(dst.code() + 1));
247   }
248 
249   // Conditional move.
250   void Move(FPURegister dst, double imm);
251   void Movz(Register rd, Register rs, Register rt);
252   void Movn(Register rd, Register rs, Register rt);
253   void Movt(Register rd, Register rs, uint16_t cc = 0);
254   void Movf(Register rd, Register rs, uint16_t cc = 0);
255 
256   void Clz(Register rd, Register rs);
257 
258   // Jump unconditionally to given label.
259   // We NEED a nop in the branch delay slot, as it used by v8, for example in
260   // CodeGenerator::ProcessDeferred().
261   // Currently the branch delay slot is filled by the MacroAssembler.
262   // Use rather b(Label) for code generation.
jmp(Label * L)263   void jmp(Label* L) {
264     Branch(L);
265   }
266 
267   // Load an object from the root table.
268   void LoadRoot(Register destination,
269                 Heap::RootListIndex index);
270   void LoadRoot(Register destination,
271                 Heap::RootListIndex index,
272                 Condition cond, Register src1, const Operand& src2);
273 
274   // Store an object to the root table.
275   void StoreRoot(Register source,
276                  Heap::RootListIndex index);
277   void StoreRoot(Register source,
278                  Heap::RootListIndex index,
279                  Condition cond, Register src1, const Operand& src2);
280 
281   void LoadHeapObject(Register dst, Handle<HeapObject> object);
282 
LoadObject(Register result,Handle<Object> object)283   void LoadObject(Register result, Handle<Object> object) {
284     if (object->IsHeapObject()) {
285       LoadHeapObject(result, Handle<HeapObject>::cast(object));
286     } else {
287       li(result, object);
288     }
289   }
290 
291   // ---------------------------------------------------------------------------
292   // GC Support
293 
294   void IncrementalMarkingRecordWriteHelper(Register object,
295                                            Register value,
296                                            Register address);
297 
298   enum RememberedSetFinalAction {
299     kReturnAtEnd,
300     kFallThroughAtEnd
301   };
302 
303 
304   // Record in the remembered set the fact that we have a pointer to new space
305   // at the address pointed to by the addr register.  Only works if addr is not
306   // in new space.
307   void RememberedSetHelper(Register object,  // Used for debug code.
308                            Register addr,
309                            Register scratch,
310                            SaveFPRegsMode save_fp,
311                            RememberedSetFinalAction and_then);
312 
313   void CheckPageFlag(Register object,
314                      Register scratch,
315                      int mask,
316                      Condition cc,
317                      Label* condition_met);
318 
319   // Check if object is in new space.  Jumps if the object is not in new space.
320   // The register scratch can be object itself, but it will be clobbered.
JumpIfNotInNewSpace(Register object,Register scratch,Label * branch)321   void JumpIfNotInNewSpace(Register object,
322                            Register scratch,
323                            Label* branch) {
324     InNewSpace(object, scratch, ne, branch);
325   }
326 
327   // Check if object is in new space.  Jumps if the object is in new space.
328   // The register scratch can be object itself, but scratch will be clobbered.
JumpIfInNewSpace(Register object,Register scratch,Label * branch)329   void JumpIfInNewSpace(Register object,
330                         Register scratch,
331                         Label* branch) {
332     InNewSpace(object, scratch, eq, branch);
333   }
334 
335   // Check if an object has a given incremental marking color.
336   void HasColor(Register object,
337                 Register scratch0,
338                 Register scratch1,
339                 Label* has_color,
340                 int first_bit,
341                 int second_bit);
342 
343   void JumpIfBlack(Register object,
344                    Register scratch0,
345                    Register scratch1,
346                    Label* on_black);
347 
348   // Checks the color of an object.  If the object is already grey or black
349   // then we just fall through, since it is already live.  If it is white and
350   // we can determine that it doesn't need to be scanned, then we just mark it
351   // black and fall through.  For the rest we jump to the label so the
352   // incremental marker can fix its assumptions.
353   void EnsureNotWhite(Register object,
354                       Register scratch1,
355                       Register scratch2,
356                       Register scratch3,
357                       Label* object_is_white_and_not_data);
358 
359   // Detects conservatively whether an object is data-only, i.e. it does need to
360   // be scanned by the garbage collector.
361   void JumpIfDataObject(Register value,
362                         Register scratch,
363                         Label* not_data_object);
364 
365   // Notify the garbage collector that we wrote a pointer into an object.
366   // |object| is the object being stored into, |value| is the object being
367   // stored.  value and scratch registers are clobbered by the operation.
368   // The offset is the offset from the start of the object, not the offset from
369   // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
370   void RecordWriteField(
371       Register object,
372       int offset,
373       Register value,
374       Register scratch,
375       RAStatus ra_status,
376       SaveFPRegsMode save_fp,
377       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
378       SmiCheck smi_check = INLINE_SMI_CHECK);
379 
380   // As above, but the offset has the tag presubtracted.  For use with
381   // MemOperand(reg, off).
382   inline void RecordWriteContextSlot(
383       Register context,
384       int offset,
385       Register value,
386       Register scratch,
387       RAStatus ra_status,
388       SaveFPRegsMode save_fp,
389       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
390       SmiCheck smi_check = INLINE_SMI_CHECK) {
391     RecordWriteField(context,
392                      offset + kHeapObjectTag,
393                      value,
394                      scratch,
395                      ra_status,
396                      save_fp,
397                      remembered_set_action,
398                      smi_check);
399   }
400 
401   // For a given |object| notify the garbage collector that the slot |address|
402   // has been written.  |value| is the object being stored. The value and
403   // address registers are clobbered by the operation.
404   void RecordWrite(
405       Register object,
406       Register address,
407       Register value,
408       RAStatus ra_status,
409       SaveFPRegsMode save_fp,
410       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
411       SmiCheck smi_check = INLINE_SMI_CHECK);
412 
413 
414   // ---------------------------------------------------------------------------
415   // Inline caching support.
416 
417   // Generate code for checking access rights - used for security checks
418   // on access to global objects across environments. The holder register
419   // is left untouched, whereas both scratch registers are clobbered.
420   void CheckAccessGlobalProxy(Register holder_reg,
421                               Register scratch,
422                               Label* miss);
423 
424   void GetNumberHash(Register reg0, Register scratch);
425 
426   void LoadFromNumberDictionary(Label* miss,
427                                 Register elements,
428                                 Register key,
429                                 Register result,
430                                 Register reg0,
431                                 Register reg1,
432                                 Register reg2);
433 
434 
MarkCode(NopMarkerTypes type)435   inline void MarkCode(NopMarkerTypes type) {
436     nop(type);
437   }
438 
439   // Check if the given instruction is a 'type' marker.
440   // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
441   // nop(type)). These instructions are generated to mark special location in
442   // the code, like some special IC code.
IsMarkedCode(Instr instr,int type)443   static inline bool IsMarkedCode(Instr instr, int type) {
444     ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
445     return IsNop(instr, type);
446   }
447 
448 
GetCodeMarker(Instr instr)449   static inline int GetCodeMarker(Instr instr) {
450     uint32_t opcode = ((instr & kOpcodeMask));
451     uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
452     uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
453     uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
454 
455     // Return <n> if we have a sll zero_reg, zero_reg, n
456     // else return -1.
457     bool sllzz = (opcode == SLL &&
458                   rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
459                   rs == static_cast<uint32_t>(ToNumber(zero_reg)));
460     int type =
461         (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
462     ASSERT((type == -1) ||
463            ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
464     return type;
465   }
466 
467 
468 
469   // ---------------------------------------------------------------------------
470   // Allocation support.
471 
472   // Allocate an object in new space. The object_size is specified
473   // either in bytes or in words if the allocation flag SIZE_IN_WORDS
474   // is passed. If the new space is exhausted control continues at the
475   // gc_required label. The allocated object is returned in result. If
476   // the flag tag_allocated_object is true the result is tagged as as
477   // a heap object. All registers are clobbered also when control
478   // continues at the gc_required label.
479   void AllocateInNewSpace(int object_size,
480                           Register result,
481                           Register scratch1,
482                           Register scratch2,
483                           Label* gc_required,
484                           AllocationFlags flags);
485   void AllocateInNewSpace(Register object_size,
486                           Register result,
487                           Register scratch1,
488                           Register scratch2,
489                           Label* gc_required,
490                           AllocationFlags flags);
491 
492   // Undo allocation in new space. The object passed and objects allocated after
493   // it will no longer be allocated. The caller must make sure that no pointers
494   // are left to the object(s) no longer allocated as they would be invalid when
495   // allocation is undone.
496   void UndoAllocationInNewSpace(Register object, Register scratch);
497 
498 
499   void AllocateTwoByteString(Register result,
500                              Register length,
501                              Register scratch1,
502                              Register scratch2,
503                              Register scratch3,
504                              Label* gc_required);
505   void AllocateAsciiString(Register result,
506                            Register length,
507                            Register scratch1,
508                            Register scratch2,
509                            Register scratch3,
510                            Label* gc_required);
511   void AllocateTwoByteConsString(Register result,
512                                  Register length,
513                                  Register scratch1,
514                                  Register scratch2,
515                                  Label* gc_required);
516   void AllocateAsciiConsString(Register result,
517                                Register length,
518                                Register scratch1,
519                                Register scratch2,
520                                Label* gc_required);
521   void AllocateTwoByteSlicedString(Register result,
522                                    Register length,
523                                    Register scratch1,
524                                    Register scratch2,
525                                    Label* gc_required);
526   void AllocateAsciiSlicedString(Register result,
527                                  Register length,
528                                  Register scratch1,
529                                  Register scratch2,
530                                  Label* gc_required);
531 
532   // Allocates a heap number or jumps to the gc_required label if the young
533   // space is full and a scavenge is needed. All registers are clobbered also
534   // when control continues at the gc_required label.
535   void AllocateHeapNumber(Register result,
536                           Register scratch1,
537                           Register scratch2,
538                           Register heap_number_map,
539                           Label* gc_required);
540   void AllocateHeapNumberWithValue(Register result,
541                                    FPURegister value,
542                                    Register scratch1,
543                                    Register scratch2,
544                                    Label* gc_required);
545 
546   // ---------------------------------------------------------------------------
547   // Instruction macros.
548 
549 #define DEFINE_INSTRUCTION(instr)                                              \
550   void instr(Register rd, Register rs, const Operand& rt);                     \
551   void instr(Register rd, Register rs, Register rt) {                          \
552     instr(rd, rs, Operand(rt));                                                \
553   }                                                                            \
554   void instr(Register rs, Register rt, int32_t j) {                            \
555     instr(rs, rt, Operand(j));                                                 \
556   }
557 
558 #define DEFINE_INSTRUCTION2(instr)                                             \
559   void instr(Register rs, const Operand& rt);                                  \
560   void instr(Register rs, Register rt) {                                       \
561     instr(rs, Operand(rt));                                                    \
562   }                                                                            \
563   void instr(Register rs, int32_t j) {                                         \
564     instr(rs, Operand(j));                                                     \
565   }
566 
567   DEFINE_INSTRUCTION(Addu);
568   DEFINE_INSTRUCTION(Subu);
569   DEFINE_INSTRUCTION(Mul);
570   DEFINE_INSTRUCTION2(Mult);
571   DEFINE_INSTRUCTION2(Multu);
572   DEFINE_INSTRUCTION2(Div);
573   DEFINE_INSTRUCTION2(Divu);
574 
575   DEFINE_INSTRUCTION(And);
576   DEFINE_INSTRUCTION(Or);
577   DEFINE_INSTRUCTION(Xor);
578   DEFINE_INSTRUCTION(Nor);
579   DEFINE_INSTRUCTION2(Neg);
580 
581   DEFINE_INSTRUCTION(Slt);
582   DEFINE_INSTRUCTION(Sltu);
583 
584   // MIPS32 R2 instruction macro.
585   DEFINE_INSTRUCTION(Ror);
586 
587 #undef DEFINE_INSTRUCTION
588 #undef DEFINE_INSTRUCTION2
589 
590 
591   // ---------------------------------------------------------------------------
592   // Pseudo-instructions.
593 
mov(Register rd,Register rt)594   void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
595 
596   // Load int32 in the rd register.
597   void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
598   inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
599     li(rd, Operand(j), mode);
600   }
601   inline void li(Register dst, Handle<Object> value,
602                  LiFlags mode = OPTIMIZE_SIZE) {
603     li(dst, Operand(value), mode);
604   }
605 
606   // Push multiple registers on the stack.
607   // Registers are saved in numerical order, with higher numbered registers
608   // saved in higher memory addresses.
609   void MultiPush(RegList regs);
610   void MultiPushReversed(RegList regs);
611 
612   void MultiPushFPU(RegList regs);
613   void MultiPushReversedFPU(RegList regs);
614 
615   // Lower case push() for compatibility with arch-independent code.
push(Register src)616   void push(Register src) {
617     Addu(sp, sp, Operand(-kPointerSize));
618     sw(src, MemOperand(sp, 0));
619   }
620 
621   // Push a handle.
622   void Push(Handle<Object> handle);
623 
624   // Push two registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2)625   void Push(Register src1, Register src2) {
626     Subu(sp, sp, Operand(2 * kPointerSize));
627     sw(src1, MemOperand(sp, 1 * kPointerSize));
628     sw(src2, MemOperand(sp, 0 * kPointerSize));
629   }
630 
631   // Push three registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3)632   void Push(Register src1, Register src2, Register src3) {
633     Subu(sp, sp, Operand(3 * kPointerSize));
634     sw(src1, MemOperand(sp, 2 * kPointerSize));
635     sw(src2, MemOperand(sp, 1 * kPointerSize));
636     sw(src3, MemOperand(sp, 0 * kPointerSize));
637   }
638 
639   // Push four registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4)640   void Push(Register src1, Register src2, Register src3, Register src4) {
641     Subu(sp, sp, Operand(4 * kPointerSize));
642     sw(src1, MemOperand(sp, 3 * kPointerSize));
643     sw(src2, MemOperand(sp, 2 * kPointerSize));
644     sw(src3, MemOperand(sp, 1 * kPointerSize));
645     sw(src4, MemOperand(sp, 0 * kPointerSize));
646   }
647 
Push(Register src,Condition cond,Register tst1,Register tst2)648   void Push(Register src, Condition cond, Register tst1, Register tst2) {
649     // Since we don't have conditional execution we use a Branch.
650     Branch(3, cond, tst1, Operand(tst2));
651     Subu(sp, sp, Operand(kPointerSize));
652     sw(src, MemOperand(sp, 0));
653   }
654 
655   // Pops multiple values from the stack and load them in the
656   // registers specified in regs. Pop order is the opposite as in MultiPush.
657   void MultiPop(RegList regs);
658   void MultiPopReversed(RegList regs);
659 
660   void MultiPopFPU(RegList regs);
661   void MultiPopReversedFPU(RegList regs);
662 
663   // Lower case pop() for compatibility with arch-independent code.
pop(Register dst)664   void pop(Register dst) {
665     lw(dst, MemOperand(sp, 0));
666     Addu(sp, sp, Operand(kPointerSize));
667   }
668 
669   // Pop two registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2)670   void Pop(Register src1, Register src2) {
671     ASSERT(!src1.is(src2));
672     lw(src2, MemOperand(sp, 0 * kPointerSize));
673     lw(src1, MemOperand(sp, 1 * kPointerSize));
674     Addu(sp, sp, 2 * kPointerSize);
675   }
676 
677   // Pop three registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3)678   void Pop(Register src1, Register src2, Register src3) {
679     lw(src3, MemOperand(sp, 0 * kPointerSize));
680     lw(src2, MemOperand(sp, 1 * kPointerSize));
681     lw(src1, MemOperand(sp, 2 * kPointerSize));
682     Addu(sp, sp, 3 * kPointerSize);
683   }
684 
685   void Pop(uint32_t count = 1) {
686     Addu(sp, sp, Operand(count * kPointerSize));
687   }
688 
689   // Push and pop the registers that can hold pointers, as defined by the
690   // RegList constant kSafepointSavedRegisters.
691   void PushSafepointRegisters();
692   void PopSafepointRegisters();
693   void PushSafepointRegistersAndDoubles();
694   void PopSafepointRegistersAndDoubles();
695   // Store value in register src in the safepoint stack slot for
696   // register dst.
697   void StoreToSafepointRegisterSlot(Register src, Register dst);
698   void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
699   // Load the value of the src register from its safepoint stack slot
700   // into register dst.
701   void LoadFromSafepointRegisterSlot(Register dst, Register src);
702 
703   // Flush the I-cache from asm code. You should use CPU::FlushICache from C.
704   // Does not handle errors.
705   void FlushICache(Register address, unsigned instructions);
706 
707   // MIPS32 R2 instruction macro.
708   void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
709   void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
710 
711   // ---------------------------------------------------------------------------
712   // FPU macros. These do not handle special cases like NaN or +- inf.
713 
714   // Convert unsigned word to double.
715   void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
716   void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
717 
718   // Convert double to unsigned word.
719   void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
720   void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
721 
722   void Trunc_w_d(FPURegister fd, FPURegister fs);
723   void Round_w_d(FPURegister fd, FPURegister fs);
724   void Floor_w_d(FPURegister fd, FPURegister fs);
725   void Ceil_w_d(FPURegister fd, FPURegister fs);
726   // Wrapper function for the different cmp/branch types.
727   void BranchF(Label* target,
728                Label* nan,
729                Condition cc,
730                FPURegister cmp1,
731                FPURegister cmp2,
732                BranchDelaySlot bd = PROTECT);
733 
734   // Alternate (inline) version for better readability with USE_DELAY_SLOT.
BranchF(BranchDelaySlot bd,Label * target,Label * nan,Condition cc,FPURegister cmp1,FPURegister cmp2)735   inline void BranchF(BranchDelaySlot bd,
736                       Label* target,
737                       Label* nan,
738                       Condition cc,
739                       FPURegister cmp1,
740                       FPURegister cmp2) {
741     BranchF(target, nan, cc, cmp1, cmp2, bd);
742   };
743 
744   // Convert the HeapNumber pointed to by source to a 32bits signed integer
745   // dest. If the HeapNumber does not fit into a 32bits signed integer branch
746   // to not_int32 label. If FPU is available double_scratch is used but not
747   // scratch2.
748   void ConvertToInt32(Register source,
749                       Register dest,
750                       Register scratch,
751                       Register scratch2,
752                       FPURegister double_scratch,
753                       Label *not_int32);
754 
755   // Truncates a double using a specific rounding mode.
756   // The except_flag will contain any exceptions caused by the instruction.
757   // If check_inexact is kDontCheckForInexactConversion, then the inexacat
758   // exception is masked.
759   void EmitFPUTruncate(FPURoundingMode rounding_mode,
760                        FPURegister result,
761                        DoubleRegister double_input,
762                        Register scratch1,
763                        Register except_flag,
764                        CheckForInexactConversion check_inexact
765                            = kDontCheckForInexactConversion);
766 
767   // Helper for EmitECMATruncate.
768   // This will truncate a floating-point value outside of the singed 32bit
769   // integer range to a 32bit signed integer.
770   // Expects the double value loaded in input_high and input_low.
771   // Exits with the answer in 'result'.
772   // Note that this code does not work for values in the 32bit range!
773   void EmitOutOfInt32RangeTruncate(Register result,
774                                    Register input_high,
775                                    Register input_low,
776                                    Register scratch);
777 
778   // Performs a truncating conversion of a floating point number as used by
779   // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
780   // Exits with 'result' holding the answer and all other registers clobbered.
781   void EmitECMATruncate(Register result,
782                         FPURegister double_input,
783                         FPURegister single_scratch,
784                         Register scratch,
785                         Register scratch2,
786                         Register scratch3);
787 
788   // Enter exit frame.
789   // argc - argument count to be dropped by LeaveExitFrame.
790   // save_doubles - saves FPU registers on stack, currently disabled.
791   // stack_space - extra stack space.
792   void EnterExitFrame(bool save_doubles,
793                       int stack_space = 0);
794 
795   // Leave the current exit frame.
796   void LeaveExitFrame(bool save_doubles,
797                       Register arg_count,
798                       bool do_return = false);
799 
800   // Get the actual activation frame alignment for target environment.
801   static int ActivationFrameAlignment();
802 
803   // Make sure the stack is aligned. Only emits code in debug mode.
804   void AssertStackIsAligned();
805 
806   void LoadContext(Register dst, int context_chain_length);
807 
808   // Conditionally load the cached Array transitioned map of type
809   // transitioned_kind from the global context if the map in register
810   // map_in_out is the cached Array map in the global context of
811   // expected_kind.
812   void LoadTransitionedArrayMapConditional(
813       ElementsKind expected_kind,
814       ElementsKind transitioned_kind,
815       Register map_in_out,
816       Register scratch,
817       Label* no_map_match);
818 
819   // Load the initial map for new Arrays from a JSFunction.
820   void LoadInitialArrayMap(Register function_in,
821                            Register scratch,
822                            Register map_out);
823 
824   void LoadGlobalFunction(int index, Register function);
825 
826   // Load the initial map from the global function. The registers
827   // function and map can be the same, function is then overwritten.
828   void LoadGlobalFunctionInitialMap(Register function,
829                                     Register map,
830                                     Register scratch);
831 
InitializeRootRegister()832   void InitializeRootRegister() {
833     ExternalReference roots_array_start =
834         ExternalReference::roots_array_start(isolate());
835     li(kRootRegister, Operand(roots_array_start));
836   }
837 
838   // -------------------------------------------------------------------------
839   // JavaScript invokes.
840 
841   // Set up call kind marking in t1. The method takes t1 as an
842   // explicit first parameter to make the code more readable at the
843   // call sites.
844   void SetCallKind(Register dst, CallKind kind);
845 
846   // Invoke the JavaScript function code by either calling or jumping.
847   void InvokeCode(Register code,
848                   const ParameterCount& expected,
849                   const ParameterCount& actual,
850                   InvokeFlag flag,
851                   const CallWrapper& call_wrapper,
852                   CallKind call_kind);
853 
854   void InvokeCode(Handle<Code> code,
855                   const ParameterCount& expected,
856                   const ParameterCount& actual,
857                   RelocInfo::Mode rmode,
858                   InvokeFlag flag,
859                   CallKind call_kind);
860 
861   // Invoke the JavaScript function in the given register. Changes the
862   // current context to the context in the function before invoking.
863   void InvokeFunction(Register function,
864                       const ParameterCount& actual,
865                       InvokeFlag flag,
866                       const CallWrapper& call_wrapper,
867                       CallKind call_kind);
868 
869   void InvokeFunction(Handle<JSFunction> function,
870                       const ParameterCount& actual,
871                       InvokeFlag flag,
872                       const CallWrapper& call_wrapper,
873                       CallKind call_kind);
874 
875 
876   void IsObjectJSObjectType(Register heap_object,
877                             Register map,
878                             Register scratch,
879                             Label* fail);
880 
881   void IsInstanceJSObjectType(Register map,
882                               Register scratch,
883                               Label* fail);
884 
885   void IsObjectJSStringType(Register object,
886                             Register scratch,
887                             Label* fail);
888 
889 #ifdef ENABLE_DEBUGGER_SUPPORT
890   // -------------------------------------------------------------------------
891   // Debugger Support.
892 
893   void DebugBreak();
894 #endif
895 
896 
897   // -------------------------------------------------------------------------
898   // Exception handling.
899 
900   // Push a new try handler and link into try handler chain.
901   void PushTryHandler(StackHandler::Kind kind, int handler_index);
902 
903   // Unlink the stack handler on top of the stack from the try handler chain.
904   // Must preserve the result register.
905   void PopTryHandler();
906 
907   // Passes thrown value to the handler of top of the try handler chain.
908   void Throw(Register value);
909 
910   // Propagates an uncatchable exception to the top of the current JS stack's
911   // handler chain.
912   void ThrowUncatchable(Register value);
913 
914   // Copies a fixed number of fields of heap objects from src to dst.
915   void CopyFields(Register dst, Register src, RegList temps, int field_count);
916 
917   // Copies a number of bytes from src to dst. All registers are clobbered. On
918   // exit src and dst will point to the place just after where the last byte was
919   // read or written and length will be zero.
920   void CopyBytes(Register src,
921                  Register dst,
922                  Register length,
923                  Register scratch);
924 
925   // Initialize fields with filler values.  Fields starting at |start_offset|
926   // not including end_offset are overwritten with the value in |filler|.  At
927   // the end the loop, |start_offset| takes the value of |end_offset|.
928   void InitializeFieldsWithFiller(Register start_offset,
929                                   Register end_offset,
930                                   Register filler);
931 
932   // -------------------------------------------------------------------------
933   // Support functions.
934 
935   // Try to get function prototype of a function and puts the value in
936   // the result register. Checks that the function really is a
937   // function and jumps to the miss label if the fast checks fail. The
938   // function register will be untouched; the other registers may be
939   // clobbered.
940   void TryGetFunctionPrototype(Register function,
941                                Register result,
942                                Register scratch,
943                                Label* miss,
944                                bool miss_on_bound_function = false);
945 
946   void GetObjectType(Register function,
947                      Register map,
948                      Register type_reg);
949 
950   // Check if a map for a JSObject indicates that the object has fast elements.
951   // Jump to the specified label if it does not.
952   void CheckFastElements(Register map,
953                          Register scratch,
954                          Label* fail);
955 
956   // Check if a map for a JSObject indicates that the object can have both smi
957   // and HeapObject elements.  Jump to the specified label if it does not.
958   void CheckFastObjectElements(Register map,
959                                Register scratch,
960                                Label* fail);
961 
962   // Check if a map for a JSObject indicates that the object has fast smi only
963   // elements.  Jump to the specified label if it does not.
964   void CheckFastSmiOnlyElements(Register map,
965                                 Register scratch,
966                                 Label* fail);
967 
968   // Check to see if maybe_number can be stored as a double in
969   // FastDoubleElements. If it can, store it at the index specified by key in
970   // the FastDoubleElements array elements. Otherwise jump to fail, in which
971   // case scratch2, scratch3 and scratch4 are unmodified.
972   void StoreNumberToDoubleElements(Register value_reg,
973                                    Register key_reg,
974                                    Register receiver_reg,
975                                    Register elements_reg,
976                                    Register scratch1,
977                                    Register scratch2,
978                                    Register scratch3,
979                                    Register scratch4,
980                                    Label* fail);
981 
982   // Compare an object's map with the specified map and its transitioned
983   // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
984   // "branch_to" if the result of the comparison is "cond". If multiple map
985   // compares are required, the compare sequences branches to early_success.
986   void CompareMapAndBranch(Register obj,
987                            Register scratch,
988                            Handle<Map> map,
989                            Label* early_success,
990                            Condition cond,
991                            Label* branch_to,
992                            CompareMapMode mode = REQUIRE_EXACT_MAP);
993 
994   // Check if the map of an object is equal to a specified map and branch to
995   // label if not. Skip the smi check if not required (object is known to be a
996   // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
997   // against maps that are ElementsKind transition maps of the specificed map.
998   void CheckMap(Register obj,
999                 Register scratch,
1000                 Handle<Map> map,
1001                 Label* fail,
1002                 SmiCheckType smi_check_type,
1003                 CompareMapMode mode = REQUIRE_EXACT_MAP);
1004 
1005 
1006   void CheckMap(Register obj,
1007                 Register scratch,
1008                 Heap::RootListIndex index,
1009                 Label* fail,
1010                 SmiCheckType smi_check_type);
1011 
1012   // Check if the map of an object is equal to a specified map and branch to a
1013   // specified target if equal. Skip the smi check if not required (object is
1014   // known to be a heap object)
1015   void DispatchMap(Register obj,
1016                    Register scratch,
1017                    Handle<Map> map,
1018                    Handle<Code> success,
1019                    SmiCheckType smi_check_type);
1020 
1021   // Generates code for reporting that an illegal operation has
1022   // occurred.
1023   void IllegalOperation(int num_arguments);
1024 
1025 
1026   // Load and check the instance type of an object for being a string.
1027   // Loads the type into the second argument register.
1028   // Returns a condition that will be enabled if the object was a string.
IsObjectStringType(Register obj,Register type,Register result)1029   Condition IsObjectStringType(Register obj,
1030                                Register type,
1031                                Register result) {
1032     lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
1033     lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
1034     And(type, type, Operand(kIsNotStringMask));
1035     ASSERT_EQ(0, kStringTag);
1036     return eq;
1037   }
1038 
1039 
1040   // Picks out an array index from the hash field.
1041   // Register use:
1042   //   hash - holds the index's hash. Clobbered.
1043   //   index - holds the overwritten index on exit.
1044   void IndexFromHash(Register hash, Register index);
1045 
1046   // Get the number of least significant bits from a register.
1047   void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
1048   void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
1049 
1050   // Load the value of a number object into a FPU double register. If the
1051   // object is not a number a jump to the label not_number is performed
1052   // and the FPU double register is unchanged.
1053   void ObjectToDoubleFPURegister(
1054       Register object,
1055       FPURegister value,
1056       Register scratch1,
1057       Register scratch2,
1058       Register heap_number_map,
1059       Label* not_number,
1060       ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
1061 
1062   // Load the value of a smi object into a FPU double register. The register
1063   // scratch1 can be the same register as smi in which case smi will hold the
1064   // untagged value afterwards.
1065   void SmiToDoubleFPURegister(Register smi,
1066                               FPURegister value,
1067                               Register scratch1);
1068 
1069   // -------------------------------------------------------------------------
1070   // Overflow handling functions.
1071   // Usage: first call the appropriate arithmetic function, then call one of the
1072   // jump functions with the overflow_dst register as the second parameter.
1073 
1074   void AdduAndCheckForOverflow(Register dst,
1075                                Register left,
1076                                Register right,
1077                                Register overflow_dst,
1078                                Register scratch = at);
1079 
1080   void SubuAndCheckForOverflow(Register dst,
1081                                Register left,
1082                                Register right,
1083                                Register overflow_dst,
1084                                Register scratch = at);
1085 
1086   void BranchOnOverflow(Label* label,
1087                         Register overflow_check,
1088                         BranchDelaySlot bd = PROTECT) {
1089     Branch(label, lt, overflow_check, Operand(zero_reg), bd);
1090   }
1091 
1092   void BranchOnNoOverflow(Label* label,
1093                           Register overflow_check,
1094                           BranchDelaySlot bd = PROTECT) {
1095     Branch(label, ge, overflow_check, Operand(zero_reg), bd);
1096   }
1097 
1098   void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1099     Ret(lt, overflow_check, Operand(zero_reg), bd);
1100   }
1101 
1102   void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1103     Ret(ge, overflow_check, Operand(zero_reg), bd);
1104   }
1105 
1106   // -------------------------------------------------------------------------
1107   // Runtime calls.
1108 
1109   // See comments at the beginning of CEntryStub::Generate.
PrepareCEntryArgs(int num_args)1110   inline void PrepareCEntryArgs(int num_args) {
1111     li(s0, num_args);
1112     li(s1, (num_args - 1) * kPointerSize);
1113   }
1114 
PrepareCEntryFunction(const ExternalReference & ref)1115   inline void PrepareCEntryFunction(const ExternalReference& ref) {
1116     li(s2, Operand(ref));
1117   }
1118 
1119   // Call a code stub.
1120   void CallStub(CodeStub* stub,
1121                 Condition cond = cc_always,
1122                 Register r1 = zero_reg,
1123                 const Operand& r2 = Operand(zero_reg),
1124                 BranchDelaySlot bd = PROTECT);
1125 
1126   // Tail call a code stub (jump).
1127   void TailCallStub(CodeStub* stub);
1128 
1129   void CallJSExitStub(CodeStub* stub);
1130 
1131   // Call a runtime routine.
1132   void CallRuntime(const Runtime::Function* f, int num_arguments);
1133   void CallRuntimeSaveDoubles(Runtime::FunctionId id);
1134 
1135   // Convenience function: Same as above, but takes the fid instead.
1136   void CallRuntime(Runtime::FunctionId fid, int num_arguments);
1137 
1138   // Convenience function: call an external reference.
1139   void CallExternalReference(const ExternalReference& ext,
1140                              int num_arguments,
1141                              BranchDelaySlot bd = PROTECT);
1142 
1143   // Tail call of a runtime routine (jump).
1144   // Like JumpToExternalReference, but also takes care of passing the number
1145   // of parameters.
1146   void TailCallExternalReference(const ExternalReference& ext,
1147                                  int num_arguments,
1148                                  int result_size);
1149 
1150   // Convenience function: tail call a runtime routine (jump).
1151   void TailCallRuntime(Runtime::FunctionId fid,
1152                        int num_arguments,
1153                        int result_size);
1154 
1155   int CalculateStackPassedWords(int num_reg_arguments,
1156                                 int num_double_arguments);
1157 
1158   // Before calling a C-function from generated code, align arguments on stack
1159   // and add space for the four mips argument slots.
1160   // After aligning the frame, non-register arguments must be stored on the
1161   // stack, after the argument-slots using helper: CFunctionArgumentOperand().
1162   // The argument count assumes all arguments are word sized.
1163   // Some compilers/platforms require the stack to be aligned when calling
1164   // C++ code.
1165   // Needs a scratch register to do some arithmetic. This register will be
1166   // trashed.
1167   void PrepareCallCFunction(int num_reg_arguments,
1168                             int num_double_registers,
1169                             Register scratch);
1170   void PrepareCallCFunction(int num_reg_arguments,
1171                             Register scratch);
1172 
1173   // Arguments 1-4 are placed in registers a0 thru a3 respectively.
1174   // Arguments 5..n are stored to stack using following:
1175   //  sw(t0, CFunctionArgumentOperand(5));
1176 
1177   // Calls a C function and cleans up the space for arguments allocated
1178   // by PrepareCallCFunction. The called function is not allowed to trigger a
1179   // garbage collection, since that might move the code and invalidate the
1180   // return address (unless this is somehow accounted for by the called
1181   // function).
1182   void CallCFunction(ExternalReference function, int num_arguments);
1183   void CallCFunction(Register function, int num_arguments);
1184   void CallCFunction(ExternalReference function,
1185                      int num_reg_arguments,
1186                      int num_double_arguments);
1187   void CallCFunction(Register function,
1188                      int num_reg_arguments,
1189                      int num_double_arguments);
1190   void GetCFunctionDoubleResult(const DoubleRegister dst);
1191 
1192   // There are two ways of passing double arguments on MIPS, depending on
1193   // whether soft or hard floating point ABI is used. These functions
1194   // abstract parameter passing for the three different ways we call
1195   // C functions from generated code.
1196   void SetCallCDoubleArguments(DoubleRegister dreg);
1197   void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
1198   void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
1199 
1200   // Calls an API function.  Allocates HandleScope, extracts returned value
1201   // from handle and propagates exceptions.  Restores context.  stack_space
1202   // - space to be unwound on exit (includes the call JS arguments space and
1203   // the additional space allocated for the fast call).
1204   void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
1205 
1206   // Jump to the builtin routine.
1207   void JumpToExternalReference(const ExternalReference& builtin,
1208                                BranchDelaySlot bd = PROTECT);
1209 
1210   // Invoke specified builtin JavaScript function. Adds an entry to
1211   // the unresolved list if the name does not resolve.
1212   void InvokeBuiltin(Builtins::JavaScript id,
1213                      InvokeFlag flag,
1214                      const CallWrapper& call_wrapper = NullCallWrapper());
1215 
1216   // Store the code object for the given builtin in the target register and
1217   // setup the function in a1.
1218   void GetBuiltinEntry(Register target, Builtins::JavaScript id);
1219 
1220   // Store the function for the given builtin in the target register.
1221   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
1222 
1223   struct Unresolved {
1224     int pc;
1225     uint32_t flags;  // See Bootstrapper::FixupFlags decoders/encoders.
1226     const char* name;
1227   };
1228 
CodeObject()1229   Handle<Object> CodeObject() {
1230     ASSERT(!code_object_.is_null());
1231     return code_object_;
1232   }
1233 
1234   // -------------------------------------------------------------------------
1235   // StatsCounter support.
1236 
1237   void SetCounter(StatsCounter* counter, int value,
1238                   Register scratch1, Register scratch2);
1239   void IncrementCounter(StatsCounter* counter, int value,
1240                         Register scratch1, Register scratch2);
1241   void DecrementCounter(StatsCounter* counter, int value,
1242                         Register scratch1, Register scratch2);
1243 
1244 
1245   // -------------------------------------------------------------------------
1246   // Debugging.
1247 
1248   // Calls Abort(msg) if the condition cc is not satisfied.
1249   // Use --debug_code to enable.
1250   void Assert(Condition cc, const char* msg, Register rs, Operand rt);
1251   void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
1252   void AssertFastElements(Register elements);
1253 
1254   // Like Assert(), but always enabled.
1255   void Check(Condition cc, const char* msg, Register rs, Operand rt);
1256 
1257   // Print a message to stdout and abort execution.
1258   void Abort(const char* msg);
1259 
1260   // Verify restrictions about code generated in stubs.
set_generating_stub(bool value)1261   void set_generating_stub(bool value) { generating_stub_ = value; }
generating_stub()1262   bool generating_stub() { return generating_stub_; }
set_allow_stub_calls(bool value)1263   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
allow_stub_calls()1264   bool allow_stub_calls() { return allow_stub_calls_; }
set_has_frame(bool value)1265   void set_has_frame(bool value) { has_frame_ = value; }
has_frame()1266   bool has_frame() { return has_frame_; }
1267   inline bool AllowThisStubCall(CodeStub* stub);
1268 
1269   // ---------------------------------------------------------------------------
1270   // Number utilities.
1271 
1272   // Check whether the value of reg is a power of two and not zero. If not
1273   // control continues at the label not_power_of_two. If reg is a power of two
1274   // the register scratch contains the value of (reg - 1) when control falls
1275   // through.
1276   void JumpIfNotPowerOfTwoOrZero(Register reg,
1277                                  Register scratch,
1278                                  Label* not_power_of_two_or_zero);
1279 
1280   // -------------------------------------------------------------------------
1281   // Smi utilities.
1282 
SmiTag(Register reg)1283   void SmiTag(Register reg) {
1284     Addu(reg, reg, reg);
1285   }
1286 
1287   // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1288   void SmiTagCheckOverflow(Register reg, Register overflow);
1289   void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1290 
SmiTag(Register dst,Register src)1291   void SmiTag(Register dst, Register src) {
1292     Addu(dst, src, src);
1293   }
1294 
SmiUntag(Register reg)1295   void SmiUntag(Register reg) {
1296     sra(reg, reg, kSmiTagSize);
1297   }
1298 
SmiUntag(Register dst,Register src)1299   void SmiUntag(Register dst, Register src) {
1300     sra(dst, src, kSmiTagSize);
1301   }
1302 
1303   // Untag the source value into destination and jump if source is a smi.
1304   // Souce and destination can be the same register.
1305   void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1306 
1307   // Untag the source value into destination and jump if source is not a smi.
1308   // Souce and destination can be the same register.
1309   void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1310 
1311   // Jump the register contains a smi.
1312   void JumpIfSmi(Register value,
1313                  Label* smi_label,
1314                  Register scratch = at,
1315                  BranchDelaySlot bd = PROTECT);
1316 
1317   // Jump if the register contains a non-smi.
1318   void JumpIfNotSmi(Register value,
1319                     Label* not_smi_label,
1320                     Register scratch = at,
1321                     BranchDelaySlot bd = PROTECT);
1322 
1323   // Jump if either of the registers contain a non-smi.
1324   void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1325   // Jump if either of the registers contain a smi.
1326   void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1327 
1328   // Abort execution if argument is a smi. Used in debug code.
1329   void AbortIfSmi(Register object);
1330   void AbortIfNotSmi(Register object);
1331 
1332   // Abort execution if argument is a string. Used in debug code.
1333   void AbortIfNotString(Register object);
1334 
1335   // Abort execution if argument is not the root value with the given index.
1336   void AbortIfNotRootValue(Register src,
1337                            Heap::RootListIndex root_value_index,
1338                            const char* message);
1339 
1340   // ---------------------------------------------------------------------------
1341   // HeapNumber utilities.
1342 
1343   void JumpIfNotHeapNumber(Register object,
1344                            Register heap_number_map,
1345                            Register scratch,
1346                            Label* on_not_heap_number);
1347 
1348   // -------------------------------------------------------------------------
1349   // String utilities.
1350 
1351   // Checks if both instance types are sequential ASCII strings and jumps to
1352   // label if either is not.
1353   void JumpIfBothInstanceTypesAreNotSequentialAscii(
1354       Register first_object_instance_type,
1355       Register second_object_instance_type,
1356       Register scratch1,
1357       Register scratch2,
1358       Label* failure);
1359 
1360   // Check if instance type is sequential ASCII string and jump to label if
1361   // it is not.
1362   void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
1363                                               Register scratch,
1364                                               Label* failure);
1365 
1366   // Test that both first and second are sequential ASCII strings.
1367   // Assume that they are non-smis.
1368   void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,
1369                                                   Register second,
1370                                                   Register scratch1,
1371                                                   Register scratch2,
1372                                                   Label* failure);
1373 
1374   // Test that both first and second are sequential ASCII strings.
1375   // Check that they are non-smis.
1376   void JumpIfNotBothSequentialAsciiStrings(Register first,
1377                                            Register second,
1378                                            Register scratch1,
1379                                            Register scratch2,
1380                                            Label* failure);
1381 
1382   void ClampUint8(Register output_reg, Register input_reg);
1383 
1384   void ClampDoubleToUint8(Register result_reg,
1385                           DoubleRegister input_reg,
1386                           DoubleRegister temp_double_reg);
1387 
1388 
1389   void LoadInstanceDescriptors(Register map, Register descriptors);
1390 
1391 
1392   // Activation support.
1393   void EnterFrame(StackFrame::Type type);
1394   void LeaveFrame(StackFrame::Type type);
1395 
1396   // Patch the relocated value (lui/ori pair).
1397   void PatchRelocatedValue(Register li_location,
1398                            Register scratch,
1399                            Register new_value);
1400   // Get the relocatad value (loaded data) from the lui/ori pair.
1401   void GetRelocatedValue(Register li_location,
1402                          Register value,
1403                          Register scratch);
1404 
1405   // Expects object in a0 and returns map with validated enum cache
1406   // in a0.  Assumes that any other register can be used as a scratch.
1407   void CheckEnumCache(Register null_value, Label* call_runtime);
1408 
1409  private:
1410   void CallCFunctionHelper(Register function,
1411                            int num_reg_arguments,
1412                            int num_double_arguments);
1413 
1414   void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
1415   void BranchShort(int16_t offset, Condition cond, Register rs,
1416                    const Operand& rt,
1417                    BranchDelaySlot bdslot = PROTECT);
1418   void BranchShort(Label* L, BranchDelaySlot bdslot = PROTECT);
1419   void BranchShort(Label* L, Condition cond, Register rs,
1420                    const Operand& rt,
1421                    BranchDelaySlot bdslot = PROTECT);
1422   void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
1423   void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
1424                           const Operand& rt,
1425                           BranchDelaySlot bdslot = PROTECT);
1426   void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
1427   void BranchAndLinkShort(Label* L, Condition cond, Register rs,
1428                           const Operand& rt,
1429                           BranchDelaySlot bdslot = PROTECT);
1430   void J(Label* L, BranchDelaySlot bdslot);
1431   void Jr(Label* L, BranchDelaySlot bdslot);
1432   void Jalr(Label* L, BranchDelaySlot bdslot);
1433 
1434   // Helper functions for generating invokes.
1435   void InvokePrologue(const ParameterCount& expected,
1436                       const ParameterCount& actual,
1437                       Handle<Code> code_constant,
1438                       Register code_reg,
1439                       Label* done,
1440                       bool* definitely_mismatches,
1441                       InvokeFlag flag,
1442                       const CallWrapper& call_wrapper,
1443                       CallKind call_kind);
1444 
1445   // Get the code for the given builtin. Returns if able to resolve
1446   // the function in the 'resolved' flag.
1447   Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
1448 
1449   void InitializeNewString(Register string,
1450                            Register length,
1451                            Heap::RootListIndex map_index,
1452                            Register scratch1,
1453                            Register scratch2);
1454 
1455   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1456   void InNewSpace(Register object,
1457                   Register scratch,
1458                   Condition cond,  // eq for new space, ne otherwise.
1459                   Label* branch);
1460 
1461   // Helper for finding the mark bits for an address.  Afterwards, the
1462   // bitmap register points at the word with the mark bits and the mask
1463   // the position of the first bit.  Leaves addr_reg unchanged.
1464   inline void GetMarkBits(Register addr_reg,
1465                           Register bitmap_reg,
1466                           Register mask_reg);
1467 
1468   // Helper for throwing exceptions.  Compute a handler address and jump to
1469   // it.  See the implementation for register usage.
1470   void JumpToHandlerEntry();
1471 
1472   // Compute memory operands for safepoint stack slots.
1473   static int SafepointRegisterStackIndex(int reg_code);
1474   MemOperand SafepointRegisterSlot(Register reg);
1475   MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1476 
1477   bool generating_stub_;
1478   bool allow_stub_calls_;
1479   bool has_frame_;
1480   // This handle will be patched with the code object on installation.
1481   Handle<Object> code_object_;
1482 
1483   // Needs access to SafepointRegisterStackIndex for optimized frame
1484   // traversal.
1485   friend class OptimizedFrame;
1486 };
1487 
1488 
1489 // The code patcher is used to patch (typically) small parts of code e.g. for
1490 // debugging and other types of instrumentation. When using the code patcher
1491 // the exact number of bytes specified must be emitted. It is not legal to emit
1492 // relocation information. If any of these constraints are violated it causes
1493 // an assertion to fail.
1494 class CodePatcher {
1495  public:
1496   CodePatcher(byte* address, int instructions);
1497   virtual ~CodePatcher();
1498 
1499   // Macro assembler to emit code.
masm()1500   MacroAssembler* masm() { return &masm_; }
1501 
1502   // Emit an instruction directly.
1503   void Emit(Instr instr);
1504 
1505   // Emit an address directly.
1506   void Emit(Address addr);
1507 
1508   // Change the condition part of an instruction leaving the rest of the current
1509   // instruction unchanged.
1510   void ChangeBranchCondition(Condition cond);
1511 
1512  private:
1513   byte* address_;  // The address of the code being patched.
1514   int instructions_;  // Number of instructions of the expected patch size.
1515   int size_;  // Number of bytes of the expected patch size.
1516   MacroAssembler masm_;  // Macro assembler used to generate the code.
1517 };
1518 
1519 
1520 
1521 #ifdef GENERATED_CODE_COVERAGE
1522 #define CODE_COVERAGE_STRINGIFY(x) #x
1523 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1524 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1525 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1526 #else
1527 #define ACCESS_MASM(masm) masm->
1528 #endif
1529 
1530 } }  // namespace v8::internal
1531 
1532 #endif  // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
1533