• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include <limits.h>  // For LONG_MIN, LONG_MAX
29 
30 #include "v8.h"
31 
32 #if defined(V8_TARGET_ARCH_MIPS)
33 
34 #include "bootstrapper.h"
35 #include "codegen-inl.h"
36 #include "debug.h"
37 #include "runtime.h"
38 
39 namespace v8 {
40 namespace internal {
41 
MacroAssembler(void * buffer,int size)42 MacroAssembler::MacroAssembler(void* buffer, int size)
43     : Assembler(buffer, size),
44       generating_stub_(false),
45       allow_stub_calls_(true),
46       code_object_(HEAP->undefined_value()) {
47 }
48 
49 
50 // Arguments macros
51 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
52 #define COND_ARGS cond, r1, r2
53 
54 #define REGISTER_TARGET_BODY(Name) \
55 void MacroAssembler::Name(Register target, \
56                           BranchDelaySlot bd) { \
57   Name(Operand(target), bd); \
58 } \
59 void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \
60                           BranchDelaySlot bd) { \
61   Name(Operand(target), COND_ARGS, bd); \
62 }
63 
64 
65 #define INT_PTR_TARGET_BODY(Name) \
66 void MacroAssembler::Name(intptr_t target, RelocInfo::Mode rmode, \
67                           BranchDelaySlot bd) { \
68   Name(Operand(target, rmode), bd); \
69 } \
70 void MacroAssembler::Name(intptr_t target, \
71                           RelocInfo::Mode rmode, \
72                           COND_TYPED_ARGS, \
73                           BranchDelaySlot bd) { \
74   Name(Operand(target, rmode), COND_ARGS, bd); \
75 }
76 
77 
78 #define BYTE_PTR_TARGET_BODY(Name) \
79 void MacroAssembler::Name(byte* target, RelocInfo::Mode rmode, \
80                           BranchDelaySlot bd) { \
81   Name(reinterpret_cast<intptr_t>(target), rmode, bd); \
82 } \
83 void MacroAssembler::Name(byte* target, \
84                           RelocInfo::Mode rmode, \
85                           COND_TYPED_ARGS, \
86                           BranchDelaySlot bd) { \
87   Name(reinterpret_cast<intptr_t>(target), rmode, COND_ARGS, bd); \
88 }
89 
90 
91 #define CODE_TARGET_BODY(Name) \
92 void MacroAssembler::Name(Handle<Code> target, RelocInfo::Mode rmode, \
93                           BranchDelaySlot bd) { \
94   Name(reinterpret_cast<intptr_t>(target.location()), rmode, bd); \
95 } \
96 void MacroAssembler::Name(Handle<Code> target, \
97                           RelocInfo::Mode rmode, \
98                           COND_TYPED_ARGS, \
99                           BranchDelaySlot bd) { \
100   Name(reinterpret_cast<intptr_t>(target.location()), rmode, COND_ARGS, bd); \
101 }
102 
103 
104 REGISTER_TARGET_BODY(Jump)
REGISTER_TARGET_BODY(Call)105 REGISTER_TARGET_BODY(Call)
106 INT_PTR_TARGET_BODY(Jump)
107 INT_PTR_TARGET_BODY(Call)
108 BYTE_PTR_TARGET_BODY(Jump)
109 BYTE_PTR_TARGET_BODY(Call)
110 CODE_TARGET_BODY(Jump)
111 CODE_TARGET_BODY(Call)
112 
113 #undef COND_TYPED_ARGS
114 #undef COND_ARGS
115 #undef REGISTER_TARGET_BODY
116 #undef BYTE_PTR_TARGET_BODY
117 #undef CODE_TARGET_BODY
118 
119 
120 void MacroAssembler::Ret(BranchDelaySlot bd) {
121   Jump(Operand(ra), bd);
122 }
123 
124 
Ret(Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)125 void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2,
126     BranchDelaySlot bd) {
127   Jump(Operand(ra), cond, r1, r2, bd);
128 }
129 
130 
LoadRoot(Register destination,Heap::RootListIndex index)131 void MacroAssembler::LoadRoot(Register destination,
132                               Heap::RootListIndex index) {
133   lw(destination, MemOperand(s6, index << kPointerSizeLog2));
134 }
135 
136 
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)137 void MacroAssembler::LoadRoot(Register destination,
138                               Heap::RootListIndex index,
139                               Condition cond,
140                               Register src1, const Operand& src2) {
141   Branch(2, NegateCondition(cond), src1, src2);
142   lw(destination, MemOperand(s6, index << kPointerSizeLog2));
143 }
144 
145 
StoreRoot(Register source,Heap::RootListIndex index)146 void MacroAssembler::StoreRoot(Register source,
147                                Heap::RootListIndex index) {
148   sw(source, MemOperand(s6, index << kPointerSizeLog2));
149 }
150 
151 
StoreRoot(Register source,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)152 void MacroAssembler::StoreRoot(Register source,
153                                Heap::RootListIndex index,
154                                Condition cond,
155                                Register src1, const Operand& src2) {
156   Branch(2, NegateCondition(cond), src1, src2);
157   sw(source, MemOperand(s6, index << kPointerSizeLog2));
158 }
159 
160 
RecordWriteHelper(Register object,Register address,Register scratch)161 void MacroAssembler::RecordWriteHelper(Register object,
162                                        Register address,
163                                        Register scratch) {
164   if (FLAG_debug_code) {
165     // Check that the object is not in new space.
166     Label not_in_new_space;
167     InNewSpace(object, scratch, ne, &not_in_new_space);
168     Abort("new-space object passed to RecordWriteHelper");
169     bind(&not_in_new_space);
170   }
171 
172   // Calculate page address: Clear bits from 0 to kPageSizeBits.
173   if (mips32r2) {
174     Ins(object, zero_reg, 0, kPageSizeBits);
175   } else {
176     // The Ins macro is slow on r1, so use shifts instead.
177     srl(object, object, kPageSizeBits);
178     sll(object, object, kPageSizeBits);
179   }
180 
181   // Calculate region number.
182   Ext(address, address, Page::kRegionSizeLog2,
183       kPageSizeBits - Page::kRegionSizeLog2);
184 
185   // Mark region dirty.
186   lw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
187   li(at, Operand(1));
188   sllv(at, at, address);
189   or_(scratch, scratch, at);
190   sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
191 }
192 
193 
InNewSpace(Register object,Register scratch,Condition cc,Label * branch)194 void MacroAssembler::InNewSpace(Register object,
195                                 Register scratch,
196                                 Condition cc,
197                                 Label* branch) {
198   ASSERT(cc == eq || cc == ne);
199   And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
200   Branch(branch, cc, scratch,
201          Operand(ExternalReference::new_space_start(isolate())));
202 }
203 
204 
205 // Will clobber 4 registers: object, scratch0, scratch1, at. The
206 // register 'object' contains a heap object pointer.  The heap object
207 // tag is shifted away.
RecordWrite(Register object,Operand offset,Register scratch0,Register scratch1)208 void MacroAssembler::RecordWrite(Register object,
209                                  Operand offset,
210                                  Register scratch0,
211                                  Register scratch1) {
212   // The compiled code assumes that record write doesn't change the
213   // context register, so we check that none of the clobbered
214   // registers are cp.
215   ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
216 
217   Label done;
218 
219   // First, test that the object is not in the new space.  We cannot set
220   // region marks for new space pages.
221   InNewSpace(object, scratch0, eq, &done);
222 
223   // Add offset into the object.
224   Addu(scratch0, object, offset);
225 
226   // Record the actual write.
227   RecordWriteHelper(object, scratch0, scratch1);
228 
229   bind(&done);
230 
231   // Clobber all input registers when running with the debug-code flag
232   // turned on to provoke errors.
233   if (FLAG_debug_code) {
234     li(object, Operand(BitCast<int32_t>(kZapValue)));
235     li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
236     li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
237   }
238 }
239 
240 
241 // Will clobber 4 registers: object, address, scratch, ip.  The
242 // register 'object' contains a heap object pointer.  The heap object
243 // tag is shifted away.
RecordWrite(Register object,Register address,Register scratch)244 void MacroAssembler::RecordWrite(Register object,
245                                  Register address,
246                                  Register scratch) {
247   // The compiled code assumes that record write doesn't change the
248   // context register, so we check that none of the clobbered
249   // registers are cp.
250   ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
251 
252   Label done;
253 
254   // First, test that the object is not in the new space.  We cannot set
255   // region marks for new space pages.
256   InNewSpace(object, scratch, eq, &done);
257 
258   // Record the actual write.
259   RecordWriteHelper(object, address, scratch);
260 
261   bind(&done);
262 
263   // Clobber all input registers when running with the debug-code flag
264   // turned on to provoke errors.
265   if (FLAG_debug_code) {
266     li(object, Operand(BitCast<int32_t>(kZapValue)));
267     li(address, Operand(BitCast<int32_t>(kZapValue)));
268     li(scratch, Operand(BitCast<int32_t>(kZapValue)));
269   }
270 }
271 
272 
273 // -----------------------------------------------------------------------------
274 // Allocation support
275 
276 
CheckAccessGlobalProxy(Register holder_reg,Register scratch,Label * miss)277 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
278                                             Register scratch,
279                                             Label* miss) {
280   Label same_contexts;
281 
282   ASSERT(!holder_reg.is(scratch));
283   ASSERT(!holder_reg.is(at));
284   ASSERT(!scratch.is(at));
285 
286   // Load current lexical context from the stack frame.
287   lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
288   // In debug mode, make sure the lexical context is set.
289 #ifdef DEBUG
290   Check(ne, "we should not have an empty lexical context",
291       scratch, Operand(zero_reg));
292 #endif
293 
294   // Load the global context of the current context.
295   int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
296   lw(scratch, FieldMemOperand(scratch, offset));
297   lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
298 
299   // Check the context is a global context.
300   if (FLAG_debug_code) {
301     // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
302     Push(holder_reg);  // Temporarily save holder on the stack.
303     // Read the first word and compare to the global_context_map.
304     lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
305     LoadRoot(at, Heap::kGlobalContextMapRootIndex);
306     Check(eq, "JSGlobalObject::global_context should be a global context.",
307           holder_reg, Operand(at));
308     Pop(holder_reg);  // Restore holder.
309   }
310 
311   // Check if both contexts are the same.
312   lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
313   Branch(&same_contexts, eq, scratch, Operand(at));
314 
315   // Check the context is a global context.
316   if (FLAG_debug_code) {
317     // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
318     Push(holder_reg);  // Temporarily save holder on the stack.
319     mov(holder_reg, at);  // Move at to its holding place.
320     LoadRoot(at, Heap::kNullValueRootIndex);
321     Check(ne, "JSGlobalProxy::context() should not be null.",
322           holder_reg, Operand(at));
323 
324     lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
325     LoadRoot(at, Heap::kGlobalContextMapRootIndex);
326     Check(eq, "JSGlobalObject::global_context should be a global context.",
327           holder_reg, Operand(at));
328     // Restore at is not needed. at is reloaded below.
329     Pop(holder_reg);  // Restore holder.
330     // Restore at to holder's context.
331     lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
332   }
333 
334   // Check that the security token in the calling global object is
335   // compatible with the security token in the receiving global
336   // object.
337   int token_offset = Context::kHeaderSize +
338                      Context::SECURITY_TOKEN_INDEX * kPointerSize;
339 
340   lw(scratch, FieldMemOperand(scratch, token_offset));
341   lw(at, FieldMemOperand(at, token_offset));
342   Branch(miss, ne, scratch, Operand(at));
343 
344   bind(&same_contexts);
345 }
346 
347 
348 // ---------------------------------------------------------------------------
349 // Instruction macros
350 
Addu(Register rd,Register rs,const Operand & rt)351 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
352   if (rt.is_reg()) {
353     addu(rd, rs, rt.rm());
354   } else {
355     if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
356       addiu(rd, rs, rt.imm32_);
357     } else {
358       // li handles the relocation.
359       ASSERT(!rs.is(at));
360       li(at, rt);
361       addu(rd, rs, at);
362     }
363   }
364 }
365 
366 
Subu(Register rd,Register rs,const Operand & rt)367 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
368   if (rt.is_reg()) {
369     subu(rd, rs, rt.rm());
370   } else {
371     if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
372       addiu(rd, rs, -rt.imm32_);  // No subiu instr, use addiu(x, y, -imm).
373     } else {
374       // li handles the relocation.
375       ASSERT(!rs.is(at));
376       li(at, rt);
377       subu(rd, rs, at);
378     }
379   }
380 }
381 
382 
Mul(Register rd,Register rs,const Operand & rt)383 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
384   if (rt.is_reg()) {
385     mul(rd, rs, rt.rm());
386   } else {
387     // li handles the relocation.
388     ASSERT(!rs.is(at));
389     li(at, rt);
390     mul(rd, rs, at);
391   }
392 }
393 
394 
Mult(Register rs,const Operand & rt)395 void MacroAssembler::Mult(Register rs, const Operand& rt) {
396   if (rt.is_reg()) {
397     mult(rs, rt.rm());
398   } else {
399     // li handles the relocation.
400     ASSERT(!rs.is(at));
401     li(at, rt);
402     mult(rs, at);
403   }
404 }
405 
406 
Multu(Register rs,const Operand & rt)407 void MacroAssembler::Multu(Register rs, const Operand& rt) {
408   if (rt.is_reg()) {
409     multu(rs, rt.rm());
410   } else {
411     // li handles the relocation.
412     ASSERT(!rs.is(at));
413     li(at, rt);
414     multu(rs, at);
415   }
416 }
417 
418 
Div(Register rs,const Operand & rt)419 void MacroAssembler::Div(Register rs, const Operand& rt) {
420   if (rt.is_reg()) {
421     div(rs, rt.rm());
422   } else {
423     // li handles the relocation.
424     ASSERT(!rs.is(at));
425     li(at, rt);
426     div(rs, at);
427   }
428 }
429 
430 
Divu(Register rs,const Operand & rt)431 void MacroAssembler::Divu(Register rs, const Operand& rt) {
432   if (rt.is_reg()) {
433     divu(rs, rt.rm());
434   } else {
435     // li handles the relocation.
436     ASSERT(!rs.is(at));
437     li(at, rt);
438     divu(rs, at);
439   }
440 }
441 
442 
And(Register rd,Register rs,const Operand & rt)443 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
444   if (rt.is_reg()) {
445     and_(rd, rs, rt.rm());
446   } else {
447     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
448       andi(rd, rs, rt.imm32_);
449     } else {
450       // li handles the relocation.
451       ASSERT(!rs.is(at));
452       li(at, rt);
453       and_(rd, rs, at);
454     }
455   }
456 }
457 
458 
Or(Register rd,Register rs,const Operand & rt)459 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
460   if (rt.is_reg()) {
461     or_(rd, rs, rt.rm());
462   } else {
463     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
464       ori(rd, rs, rt.imm32_);
465     } else {
466       // li handles the relocation.
467       ASSERT(!rs.is(at));
468       li(at, rt);
469       or_(rd, rs, at);
470     }
471   }
472 }
473 
474 
Xor(Register rd,Register rs,const Operand & rt)475 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
476   if (rt.is_reg()) {
477     xor_(rd, rs, rt.rm());
478   } else {
479     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
480       xori(rd, rs, rt.imm32_);
481     } else {
482       // li handles the relocation.
483       ASSERT(!rs.is(at));
484       li(at, rt);
485       xor_(rd, rs, at);
486     }
487   }
488 }
489 
490 
Nor(Register rd,Register rs,const Operand & rt)491 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
492   if (rt.is_reg()) {
493     nor(rd, rs, rt.rm());
494   } else {
495     // li handles the relocation.
496     ASSERT(!rs.is(at));
497     li(at, rt);
498     nor(rd, rs, at);
499   }
500 }
501 
502 
Slt(Register rd,Register rs,const Operand & rt)503 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
504   if (rt.is_reg()) {
505     slt(rd, rs, rt.rm());
506   } else {
507     if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
508       slti(rd, rs, rt.imm32_);
509     } else {
510       // li handles the relocation.
511       ASSERT(!rs.is(at));
512       li(at, rt);
513       slt(rd, rs, at);
514     }
515   }
516 }
517 
518 
Sltu(Register rd,Register rs,const Operand & rt)519 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
520   if (rt.is_reg()) {
521     sltu(rd, rs, rt.rm());
522   } else {
523     if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
524       sltiu(rd, rs, rt.imm32_);
525     } else {
526       // li handles the relocation.
527       ASSERT(!rs.is(at));
528       li(at, rt);
529       sltu(rd, rs, at);
530     }
531   }
532 }
533 
534 
Ror(Register rd,Register rs,const Operand & rt)535 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
536   if (mips32r2) {
537     if (rt.is_reg()) {
538       rotrv(rd, rs, rt.rm());
539     } else {
540       rotr(rd, rs, rt.imm32_);
541     }
542   } else {
543     if (rt.is_reg()) {
544       subu(at, zero_reg, rt.rm());
545       sllv(at, rs, at);
546       srlv(rd, rs, rt.rm());
547       or_(rd, rd, at);
548     } else {
549       if (rt.imm32_ == 0) {
550         srl(rd, rs, 0);
551       } else {
552         srl(at, rs, rt.imm32_);
553         sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
554         or_(rd, rd, at);
555       }
556     }
557   }
558 }
559 
560 
561 //------------Pseudo-instructions-------------
562 
li(Register rd,Operand j,bool gen2instr)563 void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
564   ASSERT(!j.is_reg());
565   BlockTrampolinePoolScope block_trampoline_pool(this);
566   if (!MustUseReg(j.rmode_) && !gen2instr) {
567     // Normal load of an immediate value which does not need Relocation Info.
568     if (is_int16(j.imm32_)) {
569       addiu(rd, zero_reg, j.imm32_);
570     } else if (!(j.imm32_ & kHiMask)) {
571       ori(rd, zero_reg, j.imm32_);
572     } else if (!(j.imm32_ & kImm16Mask)) {
573       lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
574     } else {
575       lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
576       ori(rd, rd, (j.imm32_ & kImm16Mask));
577     }
578   } else if (MustUseReg(j.rmode_) || gen2instr) {
579     if (MustUseReg(j.rmode_)) {
580       RecordRelocInfo(j.rmode_, j.imm32_);
581     }
582     // We need always the same number of instructions as we may need to patch
583     // this code to load another value which may need 2 instructions to load.
584     if (is_int16(j.imm32_)) {
585       nop();
586       addiu(rd, zero_reg, j.imm32_);
587     } else if (!(j.imm32_ & kHiMask)) {
588       nop();
589       ori(rd, zero_reg, j.imm32_);
590     } else if (!(j.imm32_ & kImm16Mask)) {
591       nop();
592       lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
593     } else {
594       lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
595       ori(rd, rd, (j.imm32_ & kImm16Mask));
596     }
597   }
598 }
599 
600 
601 // Exception-generating instructions and debugging support
stop(const char * msg)602 void MacroAssembler::stop(const char* msg) {
603   // TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
604   // We use the 0x54321 value to be able to find it easily when reading memory.
605   break_(0x54321);
606 }
607 
608 
MultiPush(RegList regs)609 void MacroAssembler::MultiPush(RegList regs) {
610   int16_t NumSaved = 0;
611   int16_t NumToPush = NumberOfBitsSet(regs);
612 
613   addiu(sp, sp, -4 * NumToPush);
614   for (int16_t i = kNumRegisters; i > 0; i--) {
615     if ((regs & (1 << i)) != 0) {
616       sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
617     }
618   }
619 }
620 
621 
MultiPushReversed(RegList regs)622 void MacroAssembler::MultiPushReversed(RegList regs) {
623   int16_t NumSaved = 0;
624   int16_t NumToPush = NumberOfBitsSet(regs);
625 
626   addiu(sp, sp, -4 * NumToPush);
627   for (int16_t i = 0; i < kNumRegisters; i++) {
628     if ((regs & (1 << i)) != 0) {
629       sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
630     }
631   }
632 }
633 
634 
MultiPop(RegList regs)635 void MacroAssembler::MultiPop(RegList regs) {
636   int16_t NumSaved = 0;
637 
638   for (int16_t i = 0; i < kNumRegisters; i++) {
639     if ((regs & (1 << i)) != 0) {
640       lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
641     }
642   }
643   addiu(sp, sp, 4 * NumSaved);
644 }
645 
646 
MultiPopReversed(RegList regs)647 void MacroAssembler::MultiPopReversed(RegList regs) {
648   int16_t NumSaved = 0;
649 
650   for (int16_t i = kNumRegisters; i > 0; i--) {
651     if ((regs & (1 << i)) != 0) {
652       lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
653     }
654   }
655   addiu(sp, sp, 4 * NumSaved);
656 }
657 
658 
Ext(Register rt,Register rs,uint16_t pos,uint16_t size)659 void MacroAssembler::Ext(Register rt,
660                          Register rs,
661                          uint16_t pos,
662                          uint16_t size) {
663   ASSERT(pos < 32);
664   ASSERT(pos + size < 32);
665 
666   if (mips32r2) {
667     ext_(rt, rs, pos, size);
668   } else {
669     // Move rs to rt and shift it left then right to get the
670     // desired bitfield on the right side and zeroes on the left.
671     sll(rt, rs, 32 - (pos + size));
672     srl(rt, rt, 32 - size);
673   }
674 }
675 
676 
Ins(Register rt,Register rs,uint16_t pos,uint16_t size)677 void MacroAssembler::Ins(Register rt,
678                          Register rs,
679                          uint16_t pos,
680                          uint16_t size) {
681   ASSERT(pos < 32);
682   ASSERT(pos + size < 32);
683 
684   if (mips32r2) {
685     ins_(rt, rs, pos, size);
686   } else {
687     ASSERT(!rt.is(t8) && !rs.is(t8));
688 
689     srl(t8, rt, pos + size);
690     // The left chunk from rt that needs to
691     // be saved is on the right side of t8.
692     sll(at, t8, pos + size);
693     // The 'at' register now contains the left chunk on
694     // the left (proper position) and zeroes.
695     sll(t8, rt, 32 - pos);
696     // t8 now contains the right chunk on the left and zeroes.
697     srl(t8, t8, 32 - pos);
698     // t8 now contains the right chunk on
699     // the right (proper position) and zeroes.
700     or_(rt, at, t8);
701     // rt now contains the left and right chunks from the original rt
702     // in their proper position and zeroes in the middle.
703     sll(t8, rs, 32 - size);
704     // t8 now contains the chunk from rs on the left and zeroes.
705     srl(t8, t8, 32 - size - pos);
706     // t8 now contains the original chunk from rs in
707     // the middle (proper position).
708     or_(rt, rt, t8);
709     // rt now contains the result of the ins instruction in R2 mode.
710   }
711 }
712 
713 
Cvt_d_uw(FPURegister fd,FPURegister fs)714 void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
715   // Move the data from fs to t4.
716   mfc1(t4, fs);
717   return Cvt_d_uw(fd, t4);
718 }
719 
720 
Cvt_d_uw(FPURegister fd,Register rs)721 void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
722   // Convert rs to a FP value in fd (and fd + 1).
723   // We do this by converting rs minus the MSB to avoid sign conversion,
724   // then adding 2^31-1 and 1 to the result.
725 
726   ASSERT(!fd.is(f20));
727   ASSERT(!rs.is(t9));
728   ASSERT(!rs.is(t8));
729 
730   // Save rs's MSB to t8
731   And(t8, rs, 0x80000000);
732   // Remove rs's MSB.
733   And(t9, rs, 0x7FFFFFFF);
734   // Move t9 to fd
735   mtc1(t9, fd);
736 
737   // Convert fd to a real FP value.
738   cvt_d_w(fd, fd);
739 
740   Label conversion_done;
741 
742   // If rs's MSB was 0, it's done.
743   // Otherwise we need to add that to the FP register.
744   Branch(&conversion_done, eq, t8, Operand(zero_reg));
745 
746   // First load 2^31 - 1 into f20.
747   Or(t9, zero_reg, 0x7FFFFFFF);
748   mtc1(t9, f20);
749 
750   // Convert it to FP and add it to fd.
751   cvt_d_w(f20, f20);
752   add_d(fd, fd, f20);
753   // Now add 1.
754   Or(t9, zero_reg, 1);
755   mtc1(t9, f20);
756 
757   cvt_d_w(f20, f20);
758   add_d(fd, fd, f20);
759   bind(&conversion_done);
760 }
761 
762 
Trunc_uw_d(FPURegister fd,FPURegister fs)763 void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs) {
764   Trunc_uw_d(fs, t4);
765   mtc1(t4, fd);
766 }
767 
768 
Trunc_uw_d(FPURegister fd,Register rs)769 void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs) {
770   ASSERT(!fd.is(f22));
771   ASSERT(!rs.is(t6));
772 
773   // Load 2^31 into f22.
774   Or(t6, zero_reg, 0x80000000);
775   Cvt_d_uw(f22, t6);
776 
777   // Test if f22 > fd.
778   c(OLT, D, fd, f22);
779 
780   Label simple_convert;
781   // If fd < 2^31 we can convert it normally.
782   bc1t(&simple_convert);
783 
784   // First we subtract 2^31 from fd, then trunc it to rs
785   // and add 2^31 to rs.
786 
787   sub_d(f22, fd, f22);
788   trunc_w_d(f22, f22);
789   mfc1(rs, f22);
790   or_(rs, rs, t6);
791 
792   Label done;
793   Branch(&done);
794   // Simple conversion.
795   bind(&simple_convert);
796   trunc_w_d(f22, fd);
797   mfc1(rs, f22);
798 
799   bind(&done);
800 }
801 
802 
803 // Tries to get a signed int32 out of a double precision floating point heap
804 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
805 // 32bits signed integer range.
806 // This method implementation differs from the ARM version for performance
807 // reasons.
ConvertToInt32(Register source,Register dest,Register scratch,Register scratch2,FPURegister double_scratch,Label * not_int32)808 void MacroAssembler::ConvertToInt32(Register source,
809                                     Register dest,
810                                     Register scratch,
811                                     Register scratch2,
812                                     FPURegister double_scratch,
813                                     Label *not_int32) {
814   Label right_exponent, done;
815   // Get exponent word (ENDIAN issues).
816   lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
817   // Get exponent alone in scratch2.
818   And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
819   // Load dest with zero.  We use this either for the final shift or
820   // for the answer.
821   mov(dest, zero_reg);
822   // Check whether the exponent matches a 32 bit signed int that is not a Smi.
823   // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).  This is
824   // the exponent that we are fastest at and also the highest exponent we can
825   // handle here.
826   const uint32_t non_smi_exponent =
827       (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
828   // If we have a match of the int32-but-not-Smi exponent then skip some logic.
829   Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
830   // If the exponent is higher than that then go to not_int32 case.  This
831   // catches numbers that don't fit in a signed int32, infinities and NaNs.
832   Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
833 
834   // We know the exponent is smaller than 30 (biased).  If it is less than
835   // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
836   // it rounds to zero.
837   const uint32_t zero_exponent =
838       (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
839   Subu(scratch2, scratch2, Operand(zero_exponent));
840   // Dest already has a Smi zero.
841   Branch(&done, lt, scratch2, Operand(zero_reg));
842   if (!Isolate::Current()->cpu_features()->IsSupported(FPU)) {
843     // We have a shifted exponent between 0 and 30 in scratch2.
844     srl(dest, scratch2, HeapNumber::kExponentShift);
845     // We now have the exponent in dest.  Subtract from 30 to get
846     // how much to shift down.
847     li(at, Operand(30));
848     subu(dest, at, dest);
849   }
850   bind(&right_exponent);
851   if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
852     CpuFeatures::Scope scope(FPU);
853     // MIPS FPU instructions implementing double precision to integer
854     // conversion using round to zero. Since the FP value was qualified
855     // above, the resulting integer should be a legal int32.
856     // The original 'Exponent' word is still in scratch.
857     lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
858     mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
859     trunc_w_d(double_scratch, double_scratch);
860     mfc1(dest, double_scratch);
861   } else {
862     // On entry, dest has final downshift, scratch has original sign/exp/mant.
863     // Save sign bit in top bit of dest.
864     And(scratch2, scratch, Operand(0x80000000));
865     Or(dest, dest, Operand(scratch2));
866     // Put back the implicit 1, just above mantissa field.
867     Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
868 
869     // Shift up the mantissa bits to take up the space the exponent used to
870     // take. We just orred in the implicit bit so that took care of one and
871     // we want to leave the sign bit 0 so we subtract 2 bits from the shift
872     // distance. But we want to clear the sign-bit so shift one more bit
873     // left, then shift right one bit.
874     const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
875     sll(scratch, scratch, shift_distance + 1);
876     srl(scratch, scratch, 1);
877 
878     // Get the second half of the double. For some exponents we don't
879     // actually need this because the bits get shifted out again, but
880     // it's probably slower to test than just to do it.
881     lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
882     // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
883     // The width of the field here is the same as the shift amount above.
884     const int field_width = shift_distance;
885     Ext(scratch2, scratch2, 32-shift_distance, field_width);
886     Ins(scratch, scratch2, 0, field_width);
887     // Move down according to the exponent.
888     srlv(scratch, scratch, dest);
889     // Prepare the negative version of our integer.
890     subu(scratch2, zero_reg, scratch);
891     // Trick to check sign bit (msb) held in dest, count leading zero.
892     // 0 indicates negative, save negative version with conditional move.
893     clz(dest, dest);
894     movz(scratch, scratch2, dest);
895     mov(dest, scratch);
896   }
897   bind(&done);
898 }
899 
900 
901 // Emulated condtional branches do not emit a nop in the branch delay slot.
902 //
903 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
904 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT(                                \
905     (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) ||          \
906     (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
907 
908 
Branch(int16_t offset,BranchDelaySlot bdslot)909 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
910   b(offset);
911 
912   // Emit a nop in the branch delay slot if required.
913   if (bdslot == PROTECT)
914     nop();
915 }
916 
917 
Branch(int16_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)918 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
919                             const Operand& rt,
920                             BranchDelaySlot bdslot) {
921   BRANCH_ARGS_CHECK(cond, rs, rt);
922   ASSERT(!rs.is(zero_reg));
923   Register r2 = no_reg;
924   Register scratch = at;
925 
926   if (rt.is_reg()) {
927     // We don't want any other register but scratch clobbered.
928     ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
929     r2 = rt.rm_;
930     switch (cond) {
931       case cc_always:
932         b(offset);
933         break;
934       case eq:
935         beq(rs, r2, offset);
936         break;
937       case ne:
938         bne(rs, r2, offset);
939         break;
940       // Signed comparison
941       case greater:
942         if (r2.is(zero_reg)) {
943           bgtz(rs, offset);
944         } else {
945           slt(scratch, r2, rs);
946           bne(scratch, zero_reg, offset);
947         }
948         break;
949       case greater_equal:
950         if (r2.is(zero_reg)) {
951           bgez(rs, offset);
952         } else {
953           slt(scratch, rs, r2);
954           beq(scratch, zero_reg, offset);
955         }
956         break;
957       case less:
958         if (r2.is(zero_reg)) {
959           bltz(rs, offset);
960         } else {
961           slt(scratch, rs, r2);
962           bne(scratch, zero_reg, offset);
963         }
964         break;
965       case less_equal:
966         if (r2.is(zero_reg)) {
967           blez(rs, offset);
968         } else {
969           slt(scratch, r2, rs);
970           beq(scratch, zero_reg, offset);
971         }
972         break;
973       // Unsigned comparison.
974       case Ugreater:
975         if (r2.is(zero_reg)) {
976           bgtz(rs, offset);
977         } else {
978           sltu(scratch, r2, rs);
979           bne(scratch, zero_reg, offset);
980         }
981         break;
982       case Ugreater_equal:
983         if (r2.is(zero_reg)) {
984           bgez(rs, offset);
985         } else {
986           sltu(scratch, rs, r2);
987           beq(scratch, zero_reg, offset);
988         }
989         break;
990       case Uless:
991         if (r2.is(zero_reg)) {
992           b(offset);
993         } else {
994           sltu(scratch, rs, r2);
995           bne(scratch, zero_reg, offset);
996         }
997         break;
998       case Uless_equal:
999         if (r2.is(zero_reg)) {
1000           b(offset);
1001         } else {
1002           sltu(scratch, r2, rs);
1003           beq(scratch, zero_reg, offset);
1004         }
1005         break;
1006       default:
1007         UNREACHABLE();
1008     }
1009   } else {
1010     // Be careful to always use shifted_branch_offset only just before the
1011     // branch instruction, as the location will be remember for patching the
1012     // target.
1013     switch (cond) {
1014       case cc_always:
1015         b(offset);
1016         break;
1017       case eq:
1018         // We don't want any other register but scratch clobbered.
1019         ASSERT(!scratch.is(rs));
1020         r2 = scratch;
1021         li(r2, rt);
1022         beq(rs, r2, offset);
1023         break;
1024       case ne:
1025         // We don't want any other register but scratch clobbered.
1026         ASSERT(!scratch.is(rs));
1027         r2 = scratch;
1028         li(r2, rt);
1029         bne(rs, r2, offset);
1030         break;
1031       // Signed comparison
1032       case greater:
1033         if (rt.imm32_ == 0) {
1034           bgtz(rs, offset);
1035         } else {
1036           r2 = scratch;
1037           li(r2, rt);
1038           slt(scratch, r2, rs);
1039           bne(scratch, zero_reg, offset);
1040         }
1041         break;
1042       case greater_equal:
1043         if (rt.imm32_ == 0) {
1044           bgez(rs, offset);
1045         } else if (is_int16(rt.imm32_)) {
1046           slti(scratch, rs, rt.imm32_);
1047           beq(scratch, zero_reg, offset);
1048         } else {
1049           r2 = scratch;
1050           li(r2, rt);
1051           sltu(scratch, rs, r2);
1052           beq(scratch, zero_reg, offset);
1053         }
1054         break;
1055       case less:
1056         if (rt.imm32_ == 0) {
1057           bltz(rs, offset);
1058         } else if (is_int16(rt.imm32_)) {
1059           slti(scratch, rs, rt.imm32_);
1060           bne(scratch, zero_reg, offset);
1061         } else {
1062           r2 = scratch;
1063           li(r2, rt);
1064           slt(scratch, rs, r2);
1065           bne(scratch, zero_reg, offset);
1066         }
1067         break;
1068       case less_equal:
1069         if (rt.imm32_ == 0) {
1070           blez(rs, offset);
1071         } else {
1072           r2 = scratch;
1073           li(r2, rt);
1074           slt(scratch, r2, rs);
1075           beq(scratch, zero_reg, offset);
1076        }
1077        break;
1078       // Unsigned comparison.
1079       case Ugreater:
1080         if (rt.imm32_ == 0) {
1081           bgtz(rs, offset);
1082         } else {
1083           r2 = scratch;
1084           li(r2, rt);
1085           sltu(scratch, r2, rs);
1086           bne(scratch, zero_reg, offset);
1087         }
1088         break;
1089       case Ugreater_equal:
1090         if (rt.imm32_ == 0) {
1091           bgez(rs, offset);
1092         } else if (is_int16(rt.imm32_)) {
1093           sltiu(scratch, rs, rt.imm32_);
1094           beq(scratch, zero_reg, offset);
1095         } else {
1096           r2 = scratch;
1097           li(r2, rt);
1098           sltu(scratch, rs, r2);
1099           beq(scratch, zero_reg, offset);
1100         }
1101         break;
1102       case Uless:
1103         if (rt.imm32_ == 0) {
1104           b(offset);
1105         } else if (is_int16(rt.imm32_)) {
1106           sltiu(scratch, rs, rt.imm32_);
1107           bne(scratch, zero_reg, offset);
1108         } else {
1109           r2 = scratch;
1110           li(r2, rt);
1111           sltu(scratch, rs, r2);
1112           bne(scratch, zero_reg, offset);
1113         }
1114         break;
1115       case Uless_equal:
1116         if (rt.imm32_ == 0) {
1117           b(offset);
1118         } else {
1119           r2 = scratch;
1120           li(r2, rt);
1121           sltu(scratch, r2, rs);
1122           beq(scratch, zero_reg, offset);
1123         }
1124         break;
1125       default:
1126         UNREACHABLE();
1127     }
1128   }
1129   // Emit a nop in the branch delay slot if required.
1130   if (bdslot == PROTECT)
1131     nop();
1132 }
1133 
1134 
Branch(Label * L,BranchDelaySlot bdslot)1135 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1136   // We use branch_offset as an argument for the branch instructions to be sure
1137   // it is called just before generating the branch instruction, as needed.
1138 
1139   b(shifted_branch_offset(L, false));
1140 
1141   // Emit a nop in the branch delay slot if required.
1142   if (bdslot == PROTECT)
1143     nop();
1144 }
1145 
1146 
Branch(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)1147 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1148                             const Operand& rt,
1149                             BranchDelaySlot bdslot) {
1150   BRANCH_ARGS_CHECK(cond, rs, rt);
1151 
1152   int32_t offset;
1153   Register r2 = no_reg;
1154   Register scratch = at;
1155   if (rt.is_reg()) {
1156     r2 = rt.rm_;
1157     // Be careful to always use shifted_branch_offset only just before the
1158     // branch instruction, as the location will be remember for patching the
1159     // target.
1160     switch (cond) {
1161       case cc_always:
1162         offset = shifted_branch_offset(L, false);
1163         b(offset);
1164         break;
1165       case eq:
1166         offset = shifted_branch_offset(L, false);
1167         beq(rs, r2, offset);
1168         break;
1169       case ne:
1170         offset = shifted_branch_offset(L, false);
1171         bne(rs, r2, offset);
1172         break;
1173       // Signed comparison
1174       case greater:
1175         if (r2.is(zero_reg)) {
1176           offset = shifted_branch_offset(L, false);
1177           bgtz(rs, offset);
1178         } else {
1179           slt(scratch, r2, rs);
1180           offset = shifted_branch_offset(L, false);
1181           bne(scratch, zero_reg, offset);
1182         }
1183         break;
1184       case greater_equal:
1185         if (r2.is(zero_reg)) {
1186           offset = shifted_branch_offset(L, false);
1187           bgez(rs, offset);
1188         } else {
1189           slt(scratch, rs, r2);
1190           offset = shifted_branch_offset(L, false);
1191           beq(scratch, zero_reg, offset);
1192         }
1193         break;
1194       case less:
1195         if (r2.is(zero_reg)) {
1196           offset = shifted_branch_offset(L, false);
1197           bltz(rs, offset);
1198         } else {
1199           slt(scratch, rs, r2);
1200           offset = shifted_branch_offset(L, false);
1201           bne(scratch, zero_reg, offset);
1202         }
1203         break;
1204       case less_equal:
1205         if (r2.is(zero_reg)) {
1206           offset = shifted_branch_offset(L, false);
1207           blez(rs, offset);
1208         } else {
1209           slt(scratch, r2, rs);
1210           offset = shifted_branch_offset(L, false);
1211           beq(scratch, zero_reg, offset);
1212         }
1213         break;
1214       // Unsigned comparison.
1215       case Ugreater:
1216         if (r2.is(zero_reg)) {
1217           offset = shifted_branch_offset(L, false);
1218            bgtz(rs, offset);
1219         } else {
1220           sltu(scratch, r2, rs);
1221           offset = shifted_branch_offset(L, false);
1222           bne(scratch, zero_reg, offset);
1223         }
1224         break;
1225       case Ugreater_equal:
1226         if (r2.is(zero_reg)) {
1227           offset = shifted_branch_offset(L, false);
1228           bgez(rs, offset);
1229         } else {
1230           sltu(scratch, rs, r2);
1231           offset = shifted_branch_offset(L, false);
1232           beq(scratch, zero_reg, offset);
1233         }
1234         break;
1235       case Uless:
1236         if (r2.is(zero_reg)) {
1237           offset = shifted_branch_offset(L, false);
1238           b(offset);
1239         } else {
1240           sltu(scratch, rs, r2);
1241           offset = shifted_branch_offset(L, false);
1242           bne(scratch, zero_reg, offset);
1243         }
1244         break;
1245       case Uless_equal:
1246         if (r2.is(zero_reg)) {
1247           offset = shifted_branch_offset(L, false);
1248           b(offset);
1249         } else {
1250           sltu(scratch, r2, rs);
1251           offset = shifted_branch_offset(L, false);
1252           beq(scratch, zero_reg, offset);
1253         }
1254         break;
1255       default:
1256         UNREACHABLE();
1257     }
1258   } else {
1259     // Be careful to always use shifted_branch_offset only just before the
1260     // branch instruction, as the location will be remember for patching the
1261     // target.
1262     switch (cond) {
1263       case cc_always:
1264         offset = shifted_branch_offset(L, false);
1265         b(offset);
1266         break;
1267       case eq:
1268         r2 = scratch;
1269         li(r2, rt);
1270         offset = shifted_branch_offset(L, false);
1271         beq(rs, r2, offset);
1272         break;
1273       case ne:
1274         r2 = scratch;
1275         li(r2, rt);
1276         offset = shifted_branch_offset(L, false);
1277         bne(rs, r2, offset);
1278         break;
1279       // Signed comparison
1280       case greater:
1281         if (rt.imm32_ == 0) {
1282           offset = shifted_branch_offset(L, false);
1283           bgtz(rs, offset);
1284         } else {
1285           r2 = scratch;
1286           li(r2, rt);
1287           slt(scratch, r2, rs);
1288           offset = shifted_branch_offset(L, false);
1289           bne(scratch, zero_reg, offset);
1290         }
1291         break;
1292       case greater_equal:
1293         if (rt.imm32_ == 0) {
1294           offset = shifted_branch_offset(L, false);
1295           bgez(rs, offset);
1296         } else if (is_int16(rt.imm32_)) {
1297           slti(scratch, rs, rt.imm32_);
1298           offset = shifted_branch_offset(L, false);
1299           beq(scratch, zero_reg, offset);
1300         } else {
1301           r2 = scratch;
1302           li(r2, rt);
1303           sltu(scratch, rs, r2);
1304           offset = shifted_branch_offset(L, false);
1305           beq(scratch, zero_reg, offset);
1306         }
1307         break;
1308       case less:
1309         if (rt.imm32_ == 0) {
1310           offset = shifted_branch_offset(L, false);
1311           bltz(rs, offset);
1312         } else if (is_int16(rt.imm32_)) {
1313           slti(scratch, rs, rt.imm32_);
1314           offset = shifted_branch_offset(L, false);
1315           bne(scratch, zero_reg, offset);
1316         } else {
1317           r2 = scratch;
1318           li(r2, rt);
1319           slt(scratch, rs, r2);
1320           offset = shifted_branch_offset(L, false);
1321           bne(scratch, zero_reg, offset);
1322         }
1323         break;
1324       case less_equal:
1325         if (rt.imm32_ == 0) {
1326           offset = shifted_branch_offset(L, false);
1327           blez(rs, offset);
1328         } else {
1329           r2 = scratch;
1330           li(r2, rt);
1331           slt(scratch, r2, rs);
1332           offset = shifted_branch_offset(L, false);
1333           beq(scratch, zero_reg, offset);
1334         }
1335         break;
1336       // Unsigned comparison.
1337       case Ugreater:
1338         if (rt.imm32_ == 0) {
1339           offset = shifted_branch_offset(L, false);
1340           bgtz(rs, offset);
1341         } else {
1342           r2 = scratch;
1343           li(r2, rt);
1344           sltu(scratch, r2, rs);
1345           offset = shifted_branch_offset(L, false);
1346           bne(scratch, zero_reg, offset);
1347         }
1348         break;
1349       case Ugreater_equal:
1350         if (rt.imm32_ == 0) {
1351           offset = shifted_branch_offset(L, false);
1352           bgez(rs, offset);
1353         } else if (is_int16(rt.imm32_)) {
1354           sltiu(scratch, rs, rt.imm32_);
1355           offset = shifted_branch_offset(L, false);
1356           beq(scratch, zero_reg, offset);
1357         } else {
1358           r2 = scratch;
1359           li(r2, rt);
1360           sltu(scratch, rs, r2);
1361           offset = shifted_branch_offset(L, false);
1362           beq(scratch, zero_reg, offset);
1363         }
1364         break;
1365      case Uless:
1366         if (rt.imm32_ == 0) {
1367           offset = shifted_branch_offset(L, false);
1368           b(offset);
1369         } else if (is_int16(rt.imm32_)) {
1370           sltiu(scratch, rs, rt.imm32_);
1371           offset = shifted_branch_offset(L, false);
1372           bne(scratch, zero_reg, offset);
1373         } else {
1374           r2 = scratch;
1375           li(r2, rt);
1376           sltu(scratch, rs, r2);
1377           offset = shifted_branch_offset(L, false);
1378           bne(scratch, zero_reg, offset);
1379         }
1380         break;
1381       case Uless_equal:
1382         if (rt.imm32_ == 0) {
1383           offset = shifted_branch_offset(L, false);
1384           b(offset);
1385         } else {
1386           r2 = scratch;
1387           li(r2, rt);
1388           sltu(scratch, r2, rs);
1389           offset = shifted_branch_offset(L, false);
1390           beq(scratch, zero_reg, offset);
1391         }
1392         break;
1393       default:
1394         UNREACHABLE();
1395     }
1396   }
1397   // Check that offset could actually hold on an int16_t.
1398   ASSERT(is_int16(offset));
1399   // Emit a nop in the branch delay slot if required.
1400   if (bdslot == PROTECT)
1401     nop();
1402 }
1403 
1404 
1405 // We need to use a bgezal or bltzal, but they can't be used directly with the
1406 // slt instructions. We could use sub or add instead but we would miss overflow
1407 // cases, so we keep slt and add an intermediate third instruction.
BranchAndLink(int16_t offset,BranchDelaySlot bdslot)1408 void MacroAssembler::BranchAndLink(int16_t offset,
1409                                    BranchDelaySlot bdslot) {
1410   bal(offset);
1411 
1412   // Emit a nop in the branch delay slot if required.
1413   if (bdslot == PROTECT)
1414     nop();
1415 }
1416 
1417 
BranchAndLink(int16_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)1418 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
1419                                    const Operand& rt,
1420                                    BranchDelaySlot bdslot) {
1421   BRANCH_ARGS_CHECK(cond, rs, rt);
1422   Register r2 = no_reg;
1423   Register scratch = at;
1424 
1425   if (rt.is_reg()) {
1426     r2 = rt.rm_;
1427   } else if (cond != cc_always) {
1428     r2 = scratch;
1429     li(r2, rt);
1430   }
1431 
1432   switch (cond) {
1433     case cc_always:
1434       bal(offset);
1435       break;
1436     case eq:
1437       bne(rs, r2, 2);
1438       nop();
1439       bal(offset);
1440       break;
1441     case ne:
1442       beq(rs, r2, 2);
1443       nop();
1444       bal(offset);
1445       break;
1446 
1447     // Signed comparison
1448     case greater:
1449       slt(scratch, r2, rs);
1450       addiu(scratch, scratch, -1);
1451       bgezal(scratch, offset);
1452       break;
1453     case greater_equal:
1454       slt(scratch, rs, r2);
1455       addiu(scratch, scratch, -1);
1456       bltzal(scratch, offset);
1457       break;
1458     case less:
1459       slt(scratch, rs, r2);
1460       addiu(scratch, scratch, -1);
1461       bgezal(scratch, offset);
1462       break;
1463     case less_equal:
1464       slt(scratch, r2, rs);
1465       addiu(scratch, scratch, -1);
1466       bltzal(scratch, offset);
1467       break;
1468 
1469     // Unsigned comparison.
1470     case Ugreater:
1471       sltu(scratch, r2, rs);
1472       addiu(scratch, scratch, -1);
1473       bgezal(scratch, offset);
1474       break;
1475     case Ugreater_equal:
1476       sltu(scratch, rs, r2);
1477       addiu(scratch, scratch, -1);
1478       bltzal(scratch, offset);
1479       break;
1480     case Uless:
1481       sltu(scratch, rs, r2);
1482       addiu(scratch, scratch, -1);
1483       bgezal(scratch, offset);
1484       break;
1485     case Uless_equal:
1486       sltu(scratch, r2, rs);
1487       addiu(scratch, scratch, -1);
1488       bltzal(scratch, offset);
1489       break;
1490 
1491     default:
1492       UNREACHABLE();
1493   }
1494   // Emit a nop in the branch delay slot if required.
1495   if (bdslot == PROTECT)
1496     nop();
1497 }
1498 
1499 
BranchAndLink(Label * L,BranchDelaySlot bdslot)1500 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
1501   bal(shifted_branch_offset(L, false));
1502 
1503   // Emit a nop in the branch delay slot if required.
1504   if (bdslot == PROTECT)
1505     nop();
1506 }
1507 
1508 
BranchAndLink(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)1509 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
1510                                    const Operand& rt,
1511                                    BranchDelaySlot bdslot) {
1512   BRANCH_ARGS_CHECK(cond, rs, rt);
1513 
1514   int32_t offset;
1515   Register r2 = no_reg;
1516   Register scratch = at;
1517   if (rt.is_reg()) {
1518     r2 = rt.rm_;
1519   } else if (cond != cc_always) {
1520     r2 = scratch;
1521     li(r2, rt);
1522   }
1523 
1524   switch (cond) {
1525     case cc_always:
1526       offset = shifted_branch_offset(L, false);
1527       bal(offset);
1528       break;
1529     case eq:
1530       bne(rs, r2, 2);
1531       nop();
1532       offset = shifted_branch_offset(L, false);
1533       bal(offset);
1534       break;
1535     case ne:
1536       beq(rs, r2, 2);
1537       nop();
1538       offset = shifted_branch_offset(L, false);
1539       bal(offset);
1540       break;
1541 
1542     // Signed comparison
1543     case greater:
1544       slt(scratch, r2, rs);
1545       addiu(scratch, scratch, -1);
1546       offset = shifted_branch_offset(L, false);
1547       bgezal(scratch, offset);
1548       break;
1549     case greater_equal:
1550       slt(scratch, rs, r2);
1551       addiu(scratch, scratch, -1);
1552       offset = shifted_branch_offset(L, false);
1553       bltzal(scratch, offset);
1554       break;
1555     case less:
1556       slt(scratch, rs, r2);
1557       addiu(scratch, scratch, -1);
1558       offset = shifted_branch_offset(L, false);
1559       bgezal(scratch, offset);
1560       break;
1561     case less_equal:
1562       slt(scratch, r2, rs);
1563       addiu(scratch, scratch, -1);
1564       offset = shifted_branch_offset(L, false);
1565       bltzal(scratch, offset);
1566       break;
1567 
1568     // Unsigned comparison.
1569     case Ugreater:
1570       sltu(scratch, r2, rs);
1571       addiu(scratch, scratch, -1);
1572       offset = shifted_branch_offset(L, false);
1573       bgezal(scratch, offset);
1574       break;
1575     case Ugreater_equal:
1576       sltu(scratch, rs, r2);
1577       addiu(scratch, scratch, -1);
1578       offset = shifted_branch_offset(L, false);
1579       bltzal(scratch, offset);
1580       break;
1581     case Uless:
1582       sltu(scratch, rs, r2);
1583       addiu(scratch, scratch, -1);
1584       offset = shifted_branch_offset(L, false);
1585       bgezal(scratch, offset);
1586       break;
1587     case Uless_equal:
1588       sltu(scratch, r2, rs);
1589       addiu(scratch, scratch, -1);
1590       offset = shifted_branch_offset(L, false);
1591       bltzal(scratch, offset);
1592       break;
1593 
1594     default:
1595       UNREACHABLE();
1596   }
1597 
1598   // Check that offset could actually hold on an int16_t.
1599   ASSERT(is_int16(offset));
1600 
1601   // Emit a nop in the branch delay slot if required.
1602   if (bdslot == PROTECT)
1603     nop();
1604 }
1605 
1606 
Jump(const Operand & target,BranchDelaySlot bdslot)1607 void MacroAssembler::Jump(const Operand& target, BranchDelaySlot bdslot) {
1608   BlockTrampolinePoolScope block_trampoline_pool(this);
1609   if (target.is_reg()) {
1610       jr(target.rm());
1611   } else {
1612     if (!MustUseReg(target.rmode_)) {
1613         j(target.imm32_);
1614     } else {
1615       li(t9, target);
1616       jr(t9);
1617     }
1618   }
1619   // Emit a nop in the branch delay slot if required.
1620   if (bdslot == PROTECT)
1621     nop();
1622 }
1623 
1624 
Jump(const Operand & target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)1625 void MacroAssembler::Jump(const Operand& target,
1626                           Condition cond, Register rs, const Operand& rt,
1627                           BranchDelaySlot bdslot) {
1628   BlockTrampolinePoolScope block_trampoline_pool(this);
1629   BRANCH_ARGS_CHECK(cond, rs, rt);
1630   if (target.is_reg()) {
1631     if (cond == cc_always) {
1632       jr(target.rm());
1633     } else {
1634       Branch(2, NegateCondition(cond), rs, rt);
1635       jr(target.rm());
1636     }
1637   } else {  // Not register target.
1638     if (!MustUseReg(target.rmode_)) {
1639       if (cond == cc_always) {
1640         j(target.imm32_);
1641       } else {
1642         Branch(2, NegateCondition(cond), rs, rt);
1643         j(target.imm32_);  // Will generate only one instruction.
1644       }
1645     } else {  // MustUseReg(target)
1646       li(t9, target);
1647       if (cond == cc_always) {
1648         jr(t9);
1649       } else {
1650         Branch(2, NegateCondition(cond), rs, rt);
1651         jr(t9);  // Will generate only one instruction.
1652       }
1653     }
1654   }
1655   // Emit a nop in the branch delay slot if required.
1656   if (bdslot == PROTECT)
1657     nop();
1658 }
1659 
1660 
1661 // Note: To call gcc-compiled C code on mips, you must call thru t9.
Call(const Operand & target,BranchDelaySlot bdslot)1662 void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) {
1663   BlockTrampolinePoolScope block_trampoline_pool(this);
1664   if (target.is_reg()) {
1665       jalr(target.rm());
1666   } else {    // !target.is_reg()
1667     if (!MustUseReg(target.rmode_)) {
1668       jal(target.imm32_);
1669     } else {  // MustUseReg(target)
1670       li(t9, target);
1671       jalr(t9);
1672     }
1673   }
1674   // Emit a nop in the branch delay slot if required.
1675   if (bdslot == PROTECT)
1676     nop();
1677 }
1678 
1679 
1680 // Note: To call gcc-compiled C code on mips, you must call thru t9.
Call(const Operand & target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)1681 void MacroAssembler::Call(const Operand& target,
1682                           Condition cond, Register rs, const Operand& rt,
1683                           BranchDelaySlot bdslot) {
1684   BlockTrampolinePoolScope block_trampoline_pool(this);
1685   BRANCH_ARGS_CHECK(cond, rs, rt);
1686   if (target.is_reg()) {
1687     if (cond == cc_always) {
1688       jalr(target.rm());
1689     } else {
1690       Branch(2, NegateCondition(cond), rs, rt);
1691       jalr(target.rm());
1692     }
1693   } else {    // !target.is_reg()
1694     if (!MustUseReg(target.rmode_)) {
1695       if (cond == cc_always) {
1696         jal(target.imm32_);
1697       } else {
1698         Branch(2, NegateCondition(cond), rs, rt);
1699         jal(target.imm32_);  // Will generate only one instruction.
1700       }
1701     } else {  // MustUseReg(target)
1702       li(t9, target);
1703       if (cond == cc_always) {
1704         jalr(t9);
1705       } else {
1706         Branch(2, NegateCondition(cond), rs, rt);
1707         jalr(t9);  // Will generate only one instruction.
1708       }
1709     }
1710   }
1711   // Emit a nop in the branch delay slot if required.
1712   if (bdslot == PROTECT)
1713     nop();
1714 }
1715 
1716 
Drop(int count,Condition cond,Register reg,const Operand & op)1717 void MacroAssembler::Drop(int count,
1718                           Condition cond,
1719                           Register reg,
1720                           const Operand& op) {
1721   if (count <= 0) {
1722     return;
1723   }
1724 
1725   Label skip;
1726 
1727   if (cond != al) {
1728     Branch(&skip, NegateCondition(cond), reg, op);
1729   }
1730 
1731   if (count > 0) {
1732     addiu(sp, sp, count * kPointerSize);
1733   }
1734 
1735   if (cond != al) {
1736     bind(&skip);
1737   }
1738 }
1739 
1740 
DropAndRet(int drop,Condition cond,Register r1,const Operand & r2)1741 void MacroAssembler::DropAndRet(int drop,
1742                                 Condition cond,
1743                                 Register r1,
1744                                 const Operand& r2) {
1745   // This is a workaround to make sure only one branch instruction is
1746   // generated. It relies on Drop and Ret not creating branches if
1747   // cond == cc_always.
1748   Label skip;
1749   if (cond != cc_always) {
1750     Branch(&skip, NegateCondition(cond), r1, r2);
1751   }
1752 
1753   Drop(drop);
1754   Ret();
1755 
1756   if (cond != cc_always) {
1757     bind(&skip);
1758   }
1759 }
1760 
1761 
Swap(Register reg1,Register reg2,Register scratch)1762 void MacroAssembler::Swap(Register reg1,
1763                           Register reg2,
1764                           Register scratch) {
1765   if (scratch.is(no_reg)) {
1766     Xor(reg1, reg1, Operand(reg2));
1767     Xor(reg2, reg2, Operand(reg1));
1768     Xor(reg1, reg1, Operand(reg2));
1769   } else {
1770     mov(scratch, reg1);
1771     mov(reg1, reg2);
1772     mov(reg2, scratch);
1773   }
1774 }
1775 
1776 
Call(Label * target)1777 void MacroAssembler::Call(Label* target) {
1778   BranchAndLink(target);
1779 }
1780 
1781 
Move(Register dst,Register src)1782 void MacroAssembler::Move(Register dst, Register src) {
1783   if (!dst.is(src)) {
1784     mov(dst, src);
1785   }
1786 }
1787 
1788 
1789 #ifdef ENABLE_DEBUGGER_SUPPORT
1790 
DebugBreak()1791 void MacroAssembler::DebugBreak() {
1792   ASSERT(allow_stub_calls());
1793   mov(a0, zero_reg);
1794   li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1795   CEntryStub ces(1);
1796   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
1797 }
1798 
1799 #endif  // ENABLE_DEBUGGER_SUPPORT
1800 
1801 
1802 // ---------------------------------------------------------------------------
1803 // Exception handling
1804 
PushTryHandler(CodeLocation try_location,HandlerType type)1805 void MacroAssembler::PushTryHandler(CodeLocation try_location,
1806                                     HandlerType type) {
1807   // Adjust this code if not the case.
1808   ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
1809   // The return address is passed in register ra.
1810   if (try_location == IN_JAVASCRIPT) {
1811     if (type == TRY_CATCH_HANDLER) {
1812       li(t0, Operand(StackHandler::TRY_CATCH));
1813     } else {
1814       li(t0, Operand(StackHandler::TRY_FINALLY));
1815     }
1816     ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
1817            && StackHandlerConstants::kFPOffset == 2 * kPointerSize
1818            && StackHandlerConstants::kPCOffset == 3 * kPointerSize
1819            && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1820     // Save the current handler as the next handler.
1821     li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
1822     lw(t1, MemOperand(t2));
1823 
1824     addiu(sp, sp, -StackHandlerConstants::kSize);
1825     sw(ra, MemOperand(sp, 12));
1826     sw(fp, MemOperand(sp, 8));
1827     sw(t0, MemOperand(sp, 4));
1828     sw(t1, MemOperand(sp, 0));
1829 
1830     // Link this handler as the new current one.
1831     sw(sp, MemOperand(t2));
1832 
1833   } else {
1834     // Must preserve a0-a3, and s0 (argv).
1835     ASSERT(try_location == IN_JS_ENTRY);
1836     ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
1837            && StackHandlerConstants::kFPOffset == 2 * kPointerSize
1838            && StackHandlerConstants::kPCOffset == 3 * kPointerSize
1839            && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1840 
1841     // The frame pointer does not point to a JS frame so we save NULL
1842     // for fp. We expect the code throwing an exception to check fp
1843     // before dereferencing it to restore the context.
1844     li(t0, Operand(StackHandler::ENTRY));
1845 
1846     // Save the current handler as the next handler.
1847     li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
1848     lw(t1, MemOperand(t2));
1849 
1850     addiu(sp, sp, -StackHandlerConstants::kSize);
1851     sw(ra, MemOperand(sp, 12));
1852     sw(zero_reg, MemOperand(sp, 8));
1853     sw(t0, MemOperand(sp, 4));
1854     sw(t1, MemOperand(sp, 0));
1855 
1856     // Link this handler as the new current one.
1857     sw(sp, MemOperand(t2));
1858   }
1859 }
1860 
1861 
PopTryHandler()1862 void MacroAssembler::PopTryHandler() {
1863   ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
1864   pop(a1);
1865   Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1866   li(at, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
1867   sw(a1, MemOperand(at));
1868 }
1869 
1870 
AllocateInNewSpace(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)1871 void MacroAssembler::AllocateInNewSpace(int object_size,
1872                                         Register result,
1873                                         Register scratch1,
1874                                         Register scratch2,
1875                                         Label* gc_required,
1876                                         AllocationFlags flags) {
1877   if (!FLAG_inline_new) {
1878     if (FLAG_debug_code) {
1879       // Trash the registers to simulate an allocation failure.
1880       li(result, 0x7091);
1881       li(scratch1, 0x7191);
1882       li(scratch2, 0x7291);
1883     }
1884     jmp(gc_required);
1885     return;
1886   }
1887 
1888   ASSERT(!result.is(scratch1));
1889   ASSERT(!result.is(scratch2));
1890   ASSERT(!scratch1.is(scratch2));
1891   ASSERT(!scratch1.is(t9));
1892   ASSERT(!scratch2.is(t9));
1893   ASSERT(!result.is(t9));
1894 
1895   // Make object size into bytes.
1896   if ((flags & SIZE_IN_WORDS) != 0) {
1897     object_size *= kPointerSize;
1898   }
1899   ASSERT_EQ(0, object_size & kObjectAlignmentMask);
1900 
1901   // Check relative positions of allocation top and limit addresses.
1902   // ARM adds additional checks to make sure the ldm instruction can be
1903   // used. On MIPS we don't have ldm so we don't need additional checks either.
1904   ExternalReference new_space_allocation_top =
1905       ExternalReference::new_space_allocation_top_address(isolate());
1906   ExternalReference new_space_allocation_limit =
1907       ExternalReference::new_space_allocation_limit_address(isolate());
1908   intptr_t top   =
1909       reinterpret_cast<intptr_t>(new_space_allocation_top.address());
1910   intptr_t limit =
1911       reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
1912   ASSERT((limit - top) == kPointerSize);
1913 
1914   // Set up allocation top address and object size registers.
1915   Register topaddr = scratch1;
1916   Register obj_size_reg = scratch2;
1917   li(topaddr, Operand(new_space_allocation_top));
1918   li(obj_size_reg, Operand(object_size));
1919 
1920   // This code stores a temporary value in t9.
1921   if ((flags & RESULT_CONTAINS_TOP) == 0) {
1922     // Load allocation top into result and allocation limit into t9.
1923     lw(result, MemOperand(topaddr));
1924     lw(t9, MemOperand(topaddr, kPointerSize));
1925   } else {
1926     if (FLAG_debug_code) {
1927       // Assert that result actually contains top on entry. t9 is used
1928       // immediately below so this use of t9 does not cause difference with
1929       // respect to register content between debug and release mode.
1930       lw(t9, MemOperand(topaddr));
1931       Check(eq, "Unexpected allocation top", result, Operand(t9));
1932     }
1933     // Load allocation limit into t9. Result already contains allocation top.
1934     lw(t9, MemOperand(topaddr, limit - top));
1935   }
1936 
1937   // Calculate new top and bail out if new space is exhausted. Use result
1938   // to calculate the new top.
1939   Addu(scratch2, result, Operand(obj_size_reg));
1940   Branch(gc_required, Ugreater, scratch2, Operand(t9));
1941   sw(scratch2, MemOperand(topaddr));
1942 
1943   // Tag object if requested.
1944   if ((flags & TAG_OBJECT) != 0) {
1945     Addu(result, result, Operand(kHeapObjectTag));
1946   }
1947 }
1948 
1949 
AllocateInNewSpace(Register object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)1950 void MacroAssembler::AllocateInNewSpace(Register object_size,
1951                                         Register result,
1952                                         Register scratch1,
1953                                         Register scratch2,
1954                                         Label* gc_required,
1955                                         AllocationFlags flags) {
1956   if (!FLAG_inline_new) {
1957     if (FLAG_debug_code) {
1958       // Trash the registers to simulate an allocation failure.
1959       li(result, 0x7091);
1960       li(scratch1, 0x7191);
1961       li(scratch2, 0x7291);
1962     }
1963     jmp(gc_required);
1964     return;
1965   }
1966 
1967   ASSERT(!result.is(scratch1));
1968   ASSERT(!result.is(scratch2));
1969   ASSERT(!scratch1.is(scratch2));
1970   ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
1971 
1972   // Check relative positions of allocation top and limit addresses.
1973   // ARM adds additional checks to make sure the ldm instruction can be
1974   // used. On MIPS we don't have ldm so we don't need additional checks either.
1975   ExternalReference new_space_allocation_top =
1976       ExternalReference::new_space_allocation_top_address(isolate());
1977   ExternalReference new_space_allocation_limit =
1978       ExternalReference::new_space_allocation_limit_address(isolate());
1979   intptr_t top   =
1980       reinterpret_cast<intptr_t>(new_space_allocation_top.address());
1981   intptr_t limit =
1982       reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
1983   ASSERT((limit - top) == kPointerSize);
1984 
1985   // Set up allocation top address and object size registers.
1986   Register topaddr = scratch1;
1987   li(topaddr, Operand(new_space_allocation_top));
1988 
1989   // This code stores a temporary value in t9.
1990   if ((flags & RESULT_CONTAINS_TOP) == 0) {
1991     // Load allocation top into result and allocation limit into t9.
1992     lw(result, MemOperand(topaddr));
1993     lw(t9, MemOperand(topaddr, kPointerSize));
1994   } else {
1995     if (FLAG_debug_code) {
1996       // Assert that result actually contains top on entry. t9 is used
1997       // immediately below so this use of t9 does not cause difference with
1998       // respect to register content between debug and release mode.
1999       lw(t9, MemOperand(topaddr));
2000       Check(eq, "Unexpected allocation top", result, Operand(t9));
2001     }
2002     // Load allocation limit into t9. Result already contains allocation top.
2003     lw(t9, MemOperand(topaddr, limit - top));
2004   }
2005 
2006   // Calculate new top and bail out if new space is exhausted. Use result
2007   // to calculate the new top. Object size may be in words so a shift is
2008   // required to get the number of bytes.
2009   if ((flags & SIZE_IN_WORDS) != 0) {
2010     sll(scratch2, object_size, kPointerSizeLog2);
2011     Addu(scratch2, result, scratch2);
2012   } else {
2013     Addu(scratch2, result, Operand(object_size));
2014   }
2015   Branch(gc_required, Ugreater, scratch2, Operand(t9));
2016 
2017   // Update allocation top. result temporarily holds the new top.
2018   if (FLAG_debug_code) {
2019     And(t9, scratch2, Operand(kObjectAlignmentMask));
2020     Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
2021   }
2022   sw(scratch2, MemOperand(topaddr));
2023 
2024   // Tag object if requested.
2025   if ((flags & TAG_OBJECT) != 0) {
2026     Addu(result, result, Operand(kHeapObjectTag));
2027   }
2028 }
2029 
2030 
UndoAllocationInNewSpace(Register object,Register scratch)2031 void MacroAssembler::UndoAllocationInNewSpace(Register object,
2032                                               Register scratch) {
2033   ExternalReference new_space_allocation_top =
2034       ExternalReference::new_space_allocation_top_address(isolate());
2035 
2036   // Make sure the object has no tag before resetting top.
2037   And(object, object, Operand(~kHeapObjectTagMask));
2038 #ifdef DEBUG
2039   // Check that the object un-allocated is below the current top.
2040   li(scratch, Operand(new_space_allocation_top));
2041   lw(scratch, MemOperand(scratch));
2042   Check(less, "Undo allocation of non allocated memory",
2043       object, Operand(scratch));
2044 #endif
2045   // Write the address of the object to un-allocate as the current top.
2046   li(scratch, Operand(new_space_allocation_top));
2047   sw(object, MemOperand(scratch));
2048 }
2049 
2050 
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)2051 void MacroAssembler::AllocateTwoByteString(Register result,
2052                                            Register length,
2053                                            Register scratch1,
2054                                            Register scratch2,
2055                                            Register scratch3,
2056                                            Label* gc_required) {
2057   // Calculate the number of bytes needed for the characters in the string while
2058   // observing object alignment.
2059   ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2060   sll(scratch1, length, 1);  // Length in bytes, not chars.
2061   addiu(scratch1, scratch1,
2062        kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
2063   And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2064 
2065   // Allocate two-byte string in new space.
2066   AllocateInNewSpace(scratch1,
2067                      result,
2068                      scratch2,
2069                      scratch3,
2070                      gc_required,
2071                      TAG_OBJECT);
2072 
2073   // Set the map, length and hash field.
2074   InitializeNewString(result,
2075                       length,
2076                       Heap::kStringMapRootIndex,
2077                       scratch1,
2078                       scratch2);
2079 }
2080 
2081 
AllocateAsciiString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)2082 void MacroAssembler::AllocateAsciiString(Register result,
2083                                          Register length,
2084                                          Register scratch1,
2085                                          Register scratch2,
2086                                          Register scratch3,
2087                                          Label* gc_required) {
2088   // Calculate the number of bytes needed for the characters in the string
2089   // while observing object alignment.
2090   ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
2091   ASSERT(kCharSize == 1);
2092   addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
2093   And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2094 
2095   // Allocate ASCII string in new space.
2096   AllocateInNewSpace(scratch1,
2097                      result,
2098                      scratch2,
2099                      scratch3,
2100                      gc_required,
2101                      TAG_OBJECT);
2102 
2103   // Set the map, length and hash field.
2104   InitializeNewString(result,
2105                       length,
2106                       Heap::kAsciiStringMapRootIndex,
2107                       scratch1,
2108                       scratch2);
2109 }
2110 
2111 
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2112 void MacroAssembler::AllocateTwoByteConsString(Register result,
2113                                                Register length,
2114                                                Register scratch1,
2115                                                Register scratch2,
2116                                                Label* gc_required) {
2117   AllocateInNewSpace(ConsString::kSize,
2118                      result,
2119                      scratch1,
2120                      scratch2,
2121                      gc_required,
2122                      TAG_OBJECT);
2123   InitializeNewString(result,
2124                       length,
2125                       Heap::kConsStringMapRootIndex,
2126                       scratch1,
2127                       scratch2);
2128 }
2129 
2130 
AllocateAsciiConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2131 void MacroAssembler::AllocateAsciiConsString(Register result,
2132                                              Register length,
2133                                              Register scratch1,
2134                                              Register scratch2,
2135                                              Label* gc_required) {
2136   AllocateInNewSpace(ConsString::kSize,
2137                      result,
2138                      scratch1,
2139                      scratch2,
2140                      gc_required,
2141                      TAG_OBJECT);
2142   InitializeNewString(result,
2143                       length,
2144                       Heap::kConsAsciiStringMapRootIndex,
2145                       scratch1,
2146                       scratch2);
2147 }
2148 
2149 
2150 // Allocates a heap number or jumps to the label if the young space is full and
2151 // a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * need_gc)2152 void MacroAssembler::AllocateHeapNumber(Register result,
2153                                         Register scratch1,
2154                                         Register scratch2,
2155                                         Register heap_number_map,
2156                                         Label* need_gc) {
2157   // Allocate an object in the heap for the heap number and tag it as a heap
2158   // object.
2159   AllocateInNewSpace(HeapNumber::kSize,
2160                      result,
2161                      scratch1,
2162                      scratch2,
2163                      need_gc,
2164                      TAG_OBJECT);
2165 
2166   // Store heap number map in the allocated object.
2167   AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2168   sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
2169 }
2170 
2171 
AllocateHeapNumberWithValue(Register result,FPURegister value,Register scratch1,Register scratch2,Label * gc_required)2172 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
2173                                                  FPURegister value,
2174                                                  Register scratch1,
2175                                                  Register scratch2,
2176                                                  Label* gc_required) {
2177   LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
2178   AllocateHeapNumber(result, scratch1, scratch2, t6, gc_required);
2179   sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
2180 }
2181 
2182 
2183 // Copies a fixed number of fields of heap objects from src to dst.
CopyFields(Register dst,Register src,RegList temps,int field_count)2184 void MacroAssembler::CopyFields(Register dst,
2185                                 Register src,
2186                                 RegList temps,
2187                                 int field_count) {
2188   ASSERT((temps & dst.bit()) == 0);
2189   ASSERT((temps & src.bit()) == 0);
2190   // Primitive implementation using only one temporary register.
2191 
2192   Register tmp = no_reg;
2193   // Find a temp register in temps list.
2194   for (int i = 0; i < kNumRegisters; i++) {
2195     if ((temps & (1 << i)) != 0) {
2196       tmp.code_ = i;
2197       break;
2198     }
2199   }
2200   ASSERT(!tmp.is(no_reg));
2201 
2202   for (int i = 0; i < field_count; i++) {
2203     lw(tmp, FieldMemOperand(src, i * kPointerSize));
2204     sw(tmp, FieldMemOperand(dst, i * kPointerSize));
2205   }
2206 }
2207 
2208 
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,bool is_heap_object)2209 void MacroAssembler::CheckMap(Register obj,
2210                               Register scratch,
2211                               Handle<Map> map,
2212                               Label* fail,
2213                               bool is_heap_object) {
2214   if (!is_heap_object) {
2215     JumpIfSmi(obj, fail);
2216   }
2217   lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2218   li(at, Operand(map));
2219   Branch(fail, ne, scratch, Operand(at));
2220 }
2221 
2222 
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,bool is_heap_object)2223 void MacroAssembler::CheckMap(Register obj,
2224                               Register scratch,
2225                               Heap::RootListIndex index,
2226                               Label* fail,
2227                               bool is_heap_object) {
2228   if (!is_heap_object) {
2229     JumpIfSmi(obj, fail);
2230   }
2231   lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2232   LoadRoot(at, index);
2233   Branch(fail, ne, scratch, Operand(at));
2234 }
2235 
2236 
2237 // -----------------------------------------------------------------------------
2238 // JavaScript invokes
2239 
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Handle<Code> code_constant,Register code_reg,Label * done,InvokeFlag flag,PostCallGenerator * post_call_generator)2240 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2241                                     const ParameterCount& actual,
2242                                     Handle<Code> code_constant,
2243                                     Register code_reg,
2244                                     Label* done,
2245                                     InvokeFlag flag,
2246                                     PostCallGenerator* post_call_generator) {
2247   bool definitely_matches = false;
2248   Label regular_invoke;
2249 
2250   // Check whether the expected and actual arguments count match. If not,
2251   // setup registers according to contract with ArgumentsAdaptorTrampoline:
2252   //  a0: actual arguments count
2253   //  a1: function (passed through to callee)
2254   //  a2: expected arguments count
2255   //  a3: callee code entry
2256 
2257   // The code below is made a lot easier because the calling code already sets
2258   // up actual and expected registers according to the contract if values are
2259   // passed in registers.
2260   ASSERT(actual.is_immediate() || actual.reg().is(a0));
2261   ASSERT(expected.is_immediate() || expected.reg().is(a2));
2262   ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
2263 
2264   if (expected.is_immediate()) {
2265     ASSERT(actual.is_immediate());
2266     if (expected.immediate() == actual.immediate()) {
2267       definitely_matches = true;
2268     } else {
2269       li(a0, Operand(actual.immediate()));
2270       const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
2271       if (expected.immediate() == sentinel) {
2272         // Don't worry about adapting arguments for builtins that
2273         // don't want that done. Skip adaption code by making it look
2274         // like we have a match between expected and actual number of
2275         // arguments.
2276         definitely_matches = true;
2277       } else {
2278         li(a2, Operand(expected.immediate()));
2279       }
2280     }
2281   } else {
2282     if (actual.is_immediate()) {
2283       Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
2284       li(a0, Operand(actual.immediate()));
2285     } else {
2286       Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
2287     }
2288   }
2289 
2290   if (!definitely_matches) {
2291     if (!code_constant.is_null()) {
2292       li(a3, Operand(code_constant));
2293       addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
2294     }
2295 
2296     Handle<Code> adaptor =
2297         isolate()->builtins()->ArgumentsAdaptorTrampoline();
2298     if (flag == CALL_FUNCTION) {
2299       Call(adaptor, RelocInfo::CODE_TARGET);
2300       if (post_call_generator != NULL) post_call_generator->Generate();
2301       jmp(done);
2302     } else {
2303       Jump(adaptor, RelocInfo::CODE_TARGET);
2304     }
2305     bind(&regular_invoke);
2306   }
2307 }
2308 
2309 
InvokeCode(Register code,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,PostCallGenerator * post_call_generator)2310 void MacroAssembler::InvokeCode(Register code,
2311                                 const ParameterCount& expected,
2312                                 const ParameterCount& actual,
2313                                 InvokeFlag flag,
2314                                 PostCallGenerator* post_call_generator) {
2315   Label done;
2316 
2317   InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
2318                  post_call_generator);
2319   if (flag == CALL_FUNCTION) {
2320     Call(code);
2321   } else {
2322     ASSERT(flag == JUMP_FUNCTION);
2323     Jump(code);
2324   }
2325   // Continue here if InvokePrologue does handle the invocation due to
2326   // mismatched parameter counts.
2327   bind(&done);
2328 }
2329 
2330 
InvokeCode(Handle<Code> code,const ParameterCount & expected,const ParameterCount & actual,RelocInfo::Mode rmode,InvokeFlag flag)2331 void MacroAssembler::InvokeCode(Handle<Code> code,
2332                                 const ParameterCount& expected,
2333                                 const ParameterCount& actual,
2334                                 RelocInfo::Mode rmode,
2335                                 InvokeFlag flag) {
2336   Label done;
2337 
2338   InvokePrologue(expected, actual, code, no_reg, &done, flag);
2339   if (flag == CALL_FUNCTION) {
2340     Call(code, rmode);
2341   } else {
2342     Jump(code, rmode);
2343   }
2344   // Continue here if InvokePrologue does handle the invocation due to
2345   // mismatched parameter counts.
2346   bind(&done);
2347 }
2348 
2349 
InvokeFunction(Register function,const ParameterCount & actual,InvokeFlag flag,PostCallGenerator * post_call_generator)2350 void MacroAssembler::InvokeFunction(Register function,
2351                                     const ParameterCount& actual,
2352                                     InvokeFlag flag,
2353                                     PostCallGenerator* post_call_generator) {
2354   // Contract with called JS functions requires that function is passed in a1.
2355   ASSERT(function.is(a1));
2356   Register expected_reg = a2;
2357   Register code_reg = a3;
2358 
2359   lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2360   lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2361   lw(expected_reg,
2362       FieldMemOperand(code_reg,
2363                       SharedFunctionInfo::kFormalParameterCountOffset));
2364   sra(expected_reg, expected_reg, kSmiTagSize);
2365   lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
2366 
2367   ParameterCount expected(expected_reg);
2368   InvokeCode(code_reg, expected, actual, flag, post_call_generator);
2369 }
2370 
2371 
InvokeFunction(JSFunction * function,const ParameterCount & actual,InvokeFlag flag)2372 void MacroAssembler::InvokeFunction(JSFunction* function,
2373                                     const ParameterCount& actual,
2374                                     InvokeFlag flag) {
2375   ASSERT(function->is_compiled());
2376 
2377   // Get the function and setup the context.
2378   li(a1, Operand(Handle<JSFunction>(function)));
2379   lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2380 
2381   // Invoke the cached code.
2382   Handle<Code> code(function->code());
2383   ParameterCount expected(function->shared()->formal_parameter_count());
2384   if (V8::UseCrankshaft()) {
2385     UNIMPLEMENTED_MIPS();
2386   } else {
2387     InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
2388   }
2389 }
2390 
2391 
IsObjectJSObjectType(Register heap_object,Register map,Register scratch,Label * fail)2392 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
2393                                           Register map,
2394                                           Register scratch,
2395                                           Label* fail) {
2396   lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
2397   IsInstanceJSObjectType(map, scratch, fail);
2398 }
2399 
2400 
IsInstanceJSObjectType(Register map,Register scratch,Label * fail)2401 void MacroAssembler::IsInstanceJSObjectType(Register map,
2402                                             Register scratch,
2403                                             Label* fail) {
2404   lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2405   Branch(fail, lt, scratch, Operand(FIRST_JS_OBJECT_TYPE));
2406   Branch(fail, gt, scratch, Operand(LAST_JS_OBJECT_TYPE));
2407 }
2408 
2409 
IsObjectJSStringType(Register object,Register scratch,Label * fail)2410 void MacroAssembler::IsObjectJSStringType(Register object,
2411                                           Register scratch,
2412                                           Label* fail) {
2413   ASSERT(kNotStringTag != 0);
2414 
2415   lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2416   lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2417   And(scratch, scratch, Operand(kIsNotStringMask));
2418   Branch(fail, ne, scratch, Operand(zero_reg));
2419 }
2420 
2421 
2422 // ---------------------------------------------------------------------------
2423 // Support functions.
2424 
2425 
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss)2426 void MacroAssembler::TryGetFunctionPrototype(Register function,
2427                                              Register result,
2428                                              Register scratch,
2429                                              Label* miss) {
2430   // Check that the receiver isn't a smi.
2431   JumpIfSmi(function, miss);
2432 
2433   // Check that the function really is a function.  Load map into result reg.
2434   GetObjectType(function, result, scratch);
2435   Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
2436 
2437   // Make sure that the function has an instance prototype.
2438   Label non_instance;
2439   lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2440   And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
2441   Branch(&non_instance, ne, scratch, Operand(zero_reg));
2442 
2443   // Get the prototype or initial map from the function.
2444   lw(result,
2445      FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2446 
2447   // If the prototype or initial map is the hole, don't return it and
2448   // simply miss the cache instead. This will allow us to allocate a
2449   // prototype object on-demand in the runtime system.
2450   LoadRoot(t8, Heap::kTheHoleValueRootIndex);
2451   Branch(miss, eq, result, Operand(t8));
2452 
2453   // If the function does not have an initial map, we're done.
2454   Label done;
2455   GetObjectType(result, scratch, scratch);
2456   Branch(&done, ne, scratch, Operand(MAP_TYPE));
2457 
2458   // Get the prototype from the initial map.
2459   lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
2460   jmp(&done);
2461 
2462   // Non-instance prototype: Fetch prototype from constructor field
2463   // in initial map.
2464   bind(&non_instance);
2465   lw(result, FieldMemOperand(result, Map::kConstructorOffset));
2466 
2467   // All done.
2468   bind(&done);
2469 }
2470 
2471 
GetObjectType(Register object,Register map,Register type_reg)2472 void MacroAssembler::GetObjectType(Register object,
2473                                    Register map,
2474                                    Register type_reg) {
2475   lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
2476   lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2477 }
2478 
2479 
2480 // -----------------------------------------------------------------------------
2481 // Runtime calls
2482 
CallStub(CodeStub * stub,Condition cond,Register r1,const Operand & r2)2483 void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
2484                               Register r1, const Operand& r2) {
2485   ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
2486   Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2);
2487 }
2488 
2489 
TailCallStub(CodeStub * stub)2490 void MacroAssembler::TailCallStub(CodeStub* stub) {
2491   ASSERT(allow_stub_calls());  // stub calls are not allowed in some stubs
2492   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
2493 }
2494 
2495 
IllegalOperation(int num_arguments)2496 void MacroAssembler::IllegalOperation(int num_arguments) {
2497   if (num_arguments > 0) {
2498     addiu(sp, sp, num_arguments * kPointerSize);
2499   }
2500   LoadRoot(v0, Heap::kUndefinedValueRootIndex);
2501 }
2502 
2503 
IndexFromHash(Register hash,Register index)2504 void MacroAssembler::IndexFromHash(Register hash,
2505                                    Register index) {
2506   // If the hash field contains an array index pick it out. The assert checks
2507   // that the constants for the maximum number of digits for an array index
2508   // cached in the hash field and the number of bits reserved for it does not
2509   // conflict.
2510   ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
2511          (1 << String::kArrayIndexValueBits));
2512   // We want the smi-tagged index in key.  kArrayIndexValueMask has zeros in
2513   // the low kHashShift bits.
2514   STATIC_ASSERT(kSmiTag == 0);
2515   Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
2516   sll(index, hash, kSmiTagSize);
2517 }
2518 
2519 
ObjectToDoubleFPURegister(Register object,FPURegister result,Register scratch1,Register scratch2,Register heap_number_map,Label * not_number,ObjectToDoubleFlags flags)2520 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
2521                                                FPURegister result,
2522                                                Register scratch1,
2523                                                Register scratch2,
2524                                                Register heap_number_map,
2525                                                Label* not_number,
2526                                                ObjectToDoubleFlags flags) {
2527   Label done;
2528   if ((flags & OBJECT_NOT_SMI) == 0) {
2529     Label not_smi;
2530     JumpIfNotSmi(object, &not_smi);
2531     // Remove smi tag and convert to double.
2532     sra(scratch1, object, kSmiTagSize);
2533     mtc1(scratch1, result);
2534     cvt_d_w(result, result);
2535     Branch(&done);
2536     bind(&not_smi);
2537   }
2538   // Check for heap number and load double value from it.
2539   lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
2540   Branch(not_number, ne, scratch1, Operand(heap_number_map));
2541 
2542   if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
2543     // If exponent is all ones the number is either a NaN or +/-Infinity.
2544     Register exponent = scratch1;
2545     Register mask_reg = scratch2;
2546     lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
2547     li(mask_reg, HeapNumber::kExponentMask);
2548 
2549     And(exponent, exponent, mask_reg);
2550     Branch(not_number, eq, exponent, Operand(mask_reg));
2551   }
2552   ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
2553   bind(&done);
2554 }
2555 
2556 
2557 
SmiToDoubleFPURegister(Register smi,FPURegister value,Register scratch1)2558 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
2559                                             FPURegister value,
2560                                             Register scratch1) {
2561   sra(scratch1, smi, kSmiTagSize);
2562   mtc1(scratch1, value);
2563   cvt_d_w(value, value);
2564 }
2565 
2566 
CallRuntime(const Runtime::Function * f,int num_arguments)2567 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2568                                  int num_arguments) {
2569   // All parameters are on the stack. v0 has the return value after call.
2570 
2571   // If the expected number of arguments of the runtime function is
2572   // constant, we check that the actual number of arguments match the
2573   // expectation.
2574   if (f->nargs >= 0 && f->nargs != num_arguments) {
2575     IllegalOperation(num_arguments);
2576     return;
2577   }
2578 
2579   // TODO(1236192): Most runtime routines don't need the number of
2580   // arguments passed in because it is constant. At some point we
2581   // should remove this need and make the runtime routine entry code
2582   // smarter.
2583   li(a0, num_arguments);
2584   li(a1, Operand(ExternalReference(f, isolate())));
2585   CEntryStub stub(1);
2586   CallStub(&stub);
2587 }
2588 
2589 
CallRuntimeSaveDoubles(Runtime::FunctionId id)2590 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
2591   const Runtime::Function* function = Runtime::FunctionForId(id);
2592   li(a0, Operand(function->nargs));
2593   li(a1, Operand(ExternalReference(function, isolate())));
2594   CEntryStub stub(1);
2595   stub.SaveDoubles();
2596   CallStub(&stub);
2597 }
2598 
2599 
CallRuntime(Runtime::FunctionId fid,int num_arguments)2600 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
2601   CallRuntime(Runtime::FunctionForId(fid), num_arguments);
2602 }
2603 
2604 
CallExternalReference(const ExternalReference & ext,int num_arguments)2605 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2606                                            int num_arguments) {
2607   li(a0, Operand(num_arguments));
2608   li(a1, Operand(ext));
2609 
2610   CEntryStub stub(1);
2611   CallStub(&stub);
2612 }
2613 
2614 
TailCallExternalReference(const ExternalReference & ext,int num_arguments,int result_size)2615 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2616                                                int num_arguments,
2617                                                int result_size) {
2618   // TODO(1236192): Most runtime routines don't need the number of
2619   // arguments passed in because it is constant. At some point we
2620   // should remove this need and make the runtime routine entry code
2621   // smarter.
2622   li(a0, Operand(num_arguments));
2623   JumpToExternalReference(ext);
2624 }
2625 
2626 
TailCallRuntime(Runtime::FunctionId fid,int num_arguments,int result_size)2627 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2628                                      int num_arguments,
2629                                      int result_size) {
2630   TailCallExternalReference(ExternalReference(fid, isolate()),
2631                             num_arguments,
2632                             result_size);
2633 }
2634 
2635 
JumpToExternalReference(const ExternalReference & builtin)2636 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2637   li(a1, Operand(builtin));
2638   CEntryStub stub(1);
2639   Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2640 }
2641 
2642 
InvokeBuiltin(Builtins::JavaScript id,InvokeJSFlags flags,PostCallGenerator * post_call_generator)2643 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2644                                    InvokeJSFlags flags,
2645                                    PostCallGenerator* post_call_generator) {
2646   GetBuiltinEntry(t9, id);
2647   if (flags == CALL_JS) {
2648     Call(t9);
2649     if (post_call_generator != NULL) post_call_generator->Generate();
2650   } else {
2651     ASSERT(flags == JUMP_JS);
2652     Jump(t9);
2653   }
2654 }
2655 
2656 
GetBuiltinFunction(Register target,Builtins::JavaScript id)2657 void MacroAssembler::GetBuiltinFunction(Register target,
2658                                         Builtins::JavaScript id) {
2659   // Load the builtins object into target register.
2660   lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
2661   lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2662   // Load the JavaScript builtin function from the builtins object.
2663   lw(target, FieldMemOperand(target,
2664                           JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2665 }
2666 
2667 
GetBuiltinEntry(Register target,Builtins::JavaScript id)2668 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2669   ASSERT(!target.is(a1));
2670   GetBuiltinFunction(a1, id);
2671   // Load the code entry point from the builtins object.
2672   lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
2673 }
2674 
2675 
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2676 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2677                                 Register scratch1, Register scratch2) {
2678   if (FLAG_native_code_counters && counter->Enabled()) {
2679     li(scratch1, Operand(value));
2680     li(scratch2, Operand(ExternalReference(counter)));
2681     sw(scratch1, MemOperand(scratch2));
2682   }
2683 }
2684 
2685 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2686 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2687                                       Register scratch1, Register scratch2) {
2688   ASSERT(value > 0);
2689   if (FLAG_native_code_counters && counter->Enabled()) {
2690     li(scratch2, Operand(ExternalReference(counter)));
2691     lw(scratch1, MemOperand(scratch2));
2692     Addu(scratch1, scratch1, Operand(value));
2693     sw(scratch1, MemOperand(scratch2));
2694   }
2695 }
2696 
2697 
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2698 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2699                                       Register scratch1, Register scratch2) {
2700   ASSERT(value > 0);
2701   if (FLAG_native_code_counters && counter->Enabled()) {
2702     li(scratch2, Operand(ExternalReference(counter)));
2703     lw(scratch1, MemOperand(scratch2));
2704     Subu(scratch1, scratch1, Operand(value));
2705     sw(scratch1, MemOperand(scratch2));
2706   }
2707 }
2708 
2709 
2710 // -----------------------------------------------------------------------------
2711 // Debugging
2712 
Assert(Condition cc,const char * msg,Register rs,Operand rt)2713 void MacroAssembler::Assert(Condition cc, const char* msg,
2714                             Register rs, Operand rt) {
2715   if (FLAG_debug_code)
2716     Check(cc, msg, rs, rt);
2717 }
2718 
2719 
AssertRegisterIsRoot(Register reg,Heap::RootListIndex index)2720 void MacroAssembler::AssertRegisterIsRoot(Register reg,
2721                                           Heap::RootListIndex index) {
2722   if (FLAG_debug_code) {
2723     LoadRoot(at, index);
2724     Check(eq, "Register did not match expected root", reg, Operand(at));
2725   }
2726 }
2727 
2728 
AssertFastElements(Register elements)2729 void MacroAssembler::AssertFastElements(Register elements) {
2730   if (FLAG_debug_code) {
2731     ASSERT(!elements.is(at));
2732     Label ok;
2733     Push(elements);
2734     lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2735     LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2736     Branch(&ok, eq, elements, Operand(at));
2737     LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
2738     Branch(&ok, eq, elements, Operand(at));
2739     Abort("JSObject with fast elements map has slow elements");
2740     bind(&ok);
2741     Pop(elements);
2742   }
2743 }
2744 
2745 
Check(Condition cc,const char * msg,Register rs,Operand rt)2746 void MacroAssembler::Check(Condition cc, const char* msg,
2747                            Register rs, Operand rt) {
2748   Label L;
2749   Branch(&L, cc, rs, rt);
2750   Abort(msg);
2751   // will not return here
2752   bind(&L);
2753 }
2754 
2755 
Abort(const char * msg)2756 void MacroAssembler::Abort(const char* msg) {
2757   Label abort_start;
2758   bind(&abort_start);
2759   // We want to pass the msg string like a smi to avoid GC
2760   // problems, however msg is not guaranteed to be aligned
2761   // properly. Instead, we pass an aligned pointer that is
2762   // a proper v8 smi, but also pass the alignment difference
2763   // from the real pointer as a smi.
2764   intptr_t p1 = reinterpret_cast<intptr_t>(msg);
2765   intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
2766   ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
2767 #ifdef DEBUG
2768   if (msg != NULL) {
2769     RecordComment("Abort message: ");
2770     RecordComment(msg);
2771   }
2772 #endif
2773   // Disable stub call restrictions to always allow calls to abort.
2774   AllowStubCallsScope allow_scope(this, true);
2775 
2776   li(a0, Operand(p0));
2777   Push(a0);
2778   li(a0, Operand(Smi::FromInt(p1 - p0)));
2779   Push(a0);
2780   CallRuntime(Runtime::kAbort, 2);
2781   // will not return here
2782   if (is_trampoline_pool_blocked()) {
2783     // If the calling code cares about the exact number of
2784     // instructions generated, we insert padding here to keep the size
2785     // of the Abort macro constant.
2786     // Currently in debug mode with debug_code enabled the number of
2787     // generated instructions is 14, so we use this as a maximum value.
2788     static const int kExpectedAbortInstructions = 14;
2789     int abort_instructions = InstructionsGeneratedSince(&abort_start);
2790     ASSERT(abort_instructions <= kExpectedAbortInstructions);
2791     while (abort_instructions++ < kExpectedAbortInstructions) {
2792       nop();
2793     }
2794   }
2795 }
2796 
2797 
LoadContext(Register dst,int context_chain_length)2798 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2799   if (context_chain_length > 0) {
2800     // Move up the chain of contexts to the context containing the slot.
2801     lw(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
2802     // Load the function context (which is the incoming, outer context).
2803     lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
2804     for (int i = 1; i < context_chain_length; i++) {
2805       lw(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
2806       lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
2807     }
2808     // The context may be an intermediate context, not a function context.
2809     lw(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2810   } else {  // Slot is in the current function context.
2811     // The context may be an intermediate context, not a function context.
2812     lw(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2813   }
2814 }
2815 
2816 
LoadGlobalFunction(int index,Register function)2817 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2818   // Load the global or builtins object from the current context.
2819   lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
2820   // Load the global context from the global or builtins object.
2821   lw(function, FieldMemOperand(function,
2822                                GlobalObject::kGlobalContextOffset));
2823   // Load the function from the global context.
2824   lw(function, MemOperand(function, Context::SlotOffset(index)));
2825 }
2826 
2827 
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)2828 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2829                                                   Register map,
2830                                                   Register scratch) {
2831   // Load the initial map. The global functions all have initial maps.
2832   lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2833   if (FLAG_debug_code) {
2834     Label ok, fail;
2835     CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
2836     Branch(&ok);
2837     bind(&fail);
2838     Abort("Global functions must have initial map");
2839     bind(&ok);
2840   }
2841 }
2842 
2843 
EnterFrame(StackFrame::Type type)2844 void MacroAssembler::EnterFrame(StackFrame::Type type) {
2845   addiu(sp, sp, -5 * kPointerSize);
2846   li(t8, Operand(Smi::FromInt(type)));
2847   li(t9, Operand(CodeObject()));
2848   sw(ra, MemOperand(sp, 4 * kPointerSize));
2849   sw(fp, MemOperand(sp, 3 * kPointerSize));
2850   sw(cp, MemOperand(sp, 2 * kPointerSize));
2851   sw(t8, MemOperand(sp, 1 * kPointerSize));
2852   sw(t9, MemOperand(sp, 0 * kPointerSize));
2853   addiu(fp, sp, 3 * kPointerSize);
2854 }
2855 
2856 
LeaveFrame(StackFrame::Type type)2857 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
2858   mov(sp, fp);
2859   lw(fp, MemOperand(sp, 0 * kPointerSize));
2860   lw(ra, MemOperand(sp, 1 * kPointerSize));
2861   addiu(sp, sp, 2 * kPointerSize);
2862 }
2863 
2864 
EnterExitFrame(Register hold_argc,Register hold_argv,Register hold_function,bool save_doubles)2865 void MacroAssembler::EnterExitFrame(Register hold_argc,
2866                                     Register hold_argv,
2867                                     Register hold_function,
2868                                     bool save_doubles) {
2869   // a0 is argc.
2870   sll(t8, a0, kPointerSizeLog2);
2871   addu(hold_argv, sp, t8);
2872   addiu(hold_argv, hold_argv, -kPointerSize);
2873 
2874   // Compute callee's stack pointer before making changes and save it as
2875   // t9 register so that it is restored as sp register on exit, thereby
2876   // popping the args.
2877   // t9 = sp + kPointerSize * #args
2878   addu(t9, sp, t8);
2879 
2880   // Compute the argv pointer and keep it in a callee-saved register.
2881   // This only seems to be needed for crankshaft and may cause problems
2882   // so it's disabled for now.
2883   // Subu(s6, t9, Operand(kPointerSize));
2884 
2885   // Align the stack at this point.
2886   AlignStack(0);
2887 
2888   // Save registers.
2889   addiu(sp, sp, -12);
2890   sw(t9, MemOperand(sp, 8));
2891   sw(ra, MemOperand(sp, 4));
2892   sw(fp, MemOperand(sp, 0));
2893   mov(fp, sp);  // Setup new frame pointer.
2894 
2895   li(t8, Operand(CodeObject()));
2896   Push(t8);  // Accessed from ExitFrame::code_slot.
2897 
2898   // Save the frame pointer and the context in top.
2899   li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
2900   sw(fp, MemOperand(t8));
2901   li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
2902   sw(cp, MemOperand(t8));
2903 
2904   // Setup argc and the builtin function in callee-saved registers.
2905   mov(hold_argc, a0);
2906   mov(hold_function, a1);
2907 
2908   // Optionally save all double registers.
2909   if (save_doubles) {
2910 #ifdef DEBUG
2911     int frame_alignment = ActivationFrameAlignment();
2912 #endif
2913     // The stack alignment code above made sp unaligned, so add space for one
2914     // more double register and use aligned addresses.
2915     ASSERT(kDoubleSize == frame_alignment);
2916     // Mark the frame as containing doubles by pushing a non-valid return
2917     // address, i.e. 0.
2918     ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
2919     push(zero_reg);  // Marker and alignment word.
2920     int space = FPURegister::kNumRegisters * kDoubleSize + kPointerSize;
2921     Subu(sp, sp, Operand(space));
2922     // Remember: we only need to save every 2nd double FPU value.
2923     for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
2924       FPURegister reg = FPURegister::from_code(i);
2925       sdc1(reg, MemOperand(sp, i * kDoubleSize + kPointerSize));
2926     }
2927     // Note that f0 will be accessible at fp - 2*kPointerSize -
2928     // FPURegister::kNumRegisters * kDoubleSize, since the code slot and the
2929     // alignment word were pushed after the fp.
2930   }
2931 }
2932 
2933 
LeaveExitFrame(bool save_doubles)2934 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
2935   // Optionally restore all double registers.
2936   if (save_doubles) {
2937     // TODO(regis): Use vldrm instruction.
2938     // Remember: we only need to restore every 2nd double FPU value.
2939     for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
2940       FPURegister reg = FPURegister::from_code(i);
2941       // Register f30-f31 is just below the marker.
2942       const int offset = ExitFrameConstants::kMarkerOffset;
2943       ldc1(reg, MemOperand(fp,
2944           (i - FPURegister::kNumRegisters) * kDoubleSize + offset));
2945     }
2946   }
2947 
2948   // Clear top frame.
2949   li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
2950   sw(zero_reg, MemOperand(t8));
2951 
2952   // Restore current context from top and clear it in debug mode.
2953   li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
2954   lw(cp, MemOperand(t8));
2955 #ifdef DEBUG
2956   sw(a3, MemOperand(t8));
2957 #endif
2958 
2959   // Pop the arguments, restore registers, and return.
2960   mov(sp, fp);  // Respect ABI stack constraint.
2961   lw(fp, MemOperand(sp, 0));
2962   lw(ra, MemOperand(sp, 4));
2963   lw(sp, MemOperand(sp, 8));
2964   jr(ra);
2965   nop();  // Branch delay slot nop.
2966 }
2967 
2968 
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)2969 void MacroAssembler::InitializeNewString(Register string,
2970                                          Register length,
2971                                          Heap::RootListIndex map_index,
2972                                          Register scratch1,
2973                                          Register scratch2) {
2974   sll(scratch1, length, kSmiTagSize);
2975   LoadRoot(scratch2, map_index);
2976   sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
2977   li(scratch1, Operand(String::kEmptyHashField));
2978   sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
2979   sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
2980 }
2981 
2982 
ActivationFrameAlignment()2983 int MacroAssembler::ActivationFrameAlignment() {
2984 #if defined(V8_HOST_ARCH_MIPS)
2985   // Running on the real platform. Use the alignment as mandated by the local
2986   // environment.
2987   // Note: This will break if we ever start generating snapshots on one Mips
2988   // platform for another Mips platform with a different alignment.
2989   return OS::ActivationFrameAlignment();
2990 #else  // defined(V8_HOST_ARCH_MIPS)
2991   // If we are using the simulator then we should always align to the expected
2992   // alignment. As the simulator is used to generate snapshots we do not know
2993   // if the target platform will need alignment, so this is controlled from a
2994   // flag.
2995   return FLAG_sim_stack_alignment;
2996 #endif  // defined(V8_HOST_ARCH_MIPS)
2997 }
2998 
2999 
AlignStack(int offset)3000 void MacroAssembler::AlignStack(int offset) {
3001   // On MIPS an offset of 0 aligns to 0 modulo 8 bytes,
3002   //     and an offset of 1 aligns to 4 modulo 8 bytes.
3003 #if defined(V8_HOST_ARCH_MIPS)
3004   // Running on the real platform. Use the alignment as mandated by the local
3005   // environment.
3006   // Note: This will break if we ever start generating snapshots on one MIPS
3007   // platform for another MIPS platform with a different alignment.
3008   int activation_frame_alignment = OS::ActivationFrameAlignment();
3009 #else  // defined(V8_HOST_ARCH_MIPS)
3010   // If we are using the simulator then we should always align to the expected
3011   // alignment. As the simulator is used to generate snapshots we do not know
3012   // if the target platform will need alignment, so we will always align at
3013   // this point here.
3014   int activation_frame_alignment = 2 * kPointerSize;
3015 #endif  // defined(V8_HOST_ARCH_MIPS)
3016   if (activation_frame_alignment != kPointerSize) {
3017     // This code needs to be made more general if this assert doesn't hold.
3018     ASSERT(activation_frame_alignment == 2 * kPointerSize);
3019     if (offset == 0) {
3020       andi(t8, sp, activation_frame_alignment - 1);
3021       Push(zero_reg, eq, t8, zero_reg);
3022     } else {
3023       andi(t8, sp, activation_frame_alignment - 1);
3024       addiu(t8, t8, -4);
3025       Push(zero_reg, eq, t8, zero_reg);
3026     }
3027   }
3028 }
3029 
3030 
3031 
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)3032 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
3033     Register reg,
3034     Register scratch,
3035     Label* not_power_of_two_or_zero) {
3036   Subu(scratch, reg, Operand(1));
3037   Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
3038          scratch, Operand(zero_reg));
3039   and_(at, scratch, reg);  // In the delay slot.
3040   Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
3041 }
3042 
3043 
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)3044 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
3045                                       Register reg2,
3046                                       Label* on_not_both_smi) {
3047   STATIC_ASSERT(kSmiTag == 0);
3048   ASSERT_EQ(1, kSmiTagMask);
3049   or_(at, reg1, reg2);
3050   andi(at, at, kSmiTagMask);
3051   Branch(on_not_both_smi, ne, at, Operand(zero_reg));
3052 }
3053 
3054 
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)3055 void MacroAssembler::JumpIfEitherSmi(Register reg1,
3056                                      Register reg2,
3057                                      Label* on_either_smi) {
3058   STATIC_ASSERT(kSmiTag == 0);
3059   ASSERT_EQ(1, kSmiTagMask);
3060   // Both Smi tags must be 1 (not Smi).
3061   and_(at, reg1, reg2);
3062   andi(at, at, kSmiTagMask);
3063   Branch(on_either_smi, eq, at, Operand(zero_reg));
3064 }
3065 
3066 
AbortIfSmi(Register object)3067 void MacroAssembler::AbortIfSmi(Register object) {
3068   STATIC_ASSERT(kSmiTag == 0);
3069   andi(at, object, kSmiTagMask);
3070   Assert(ne, "Operand is a smi", at, Operand(zero_reg));
3071 }
3072 
3073 
AbortIfNotSmi(Register object)3074 void MacroAssembler::AbortIfNotSmi(Register object) {
3075   STATIC_ASSERT(kSmiTag == 0);
3076   andi(at, object, kSmiTagMask);
3077   Assert(eq, "Operand is a smi", at, Operand(zero_reg));
3078 }
3079 
3080 
AbortIfNotRootValue(Register src,Heap::RootListIndex root_value_index,const char * message)3081 void MacroAssembler::AbortIfNotRootValue(Register src,
3082                                          Heap::RootListIndex root_value_index,
3083                                          const char* message) {
3084   ASSERT(!src.is(at));
3085   LoadRoot(at, root_value_index);
3086   Assert(eq, message, src, Operand(at));
3087 }
3088 
3089 
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)3090 void MacroAssembler::JumpIfNotHeapNumber(Register object,
3091                                          Register heap_number_map,
3092                                          Register scratch,
3093                                          Label* on_not_heap_number) {
3094   lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3095   AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3096   Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
3097 }
3098 
3099 
JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3100 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
3101     Register first,
3102     Register second,
3103     Register scratch1,
3104     Register scratch2,
3105     Label* failure) {
3106   // Test that both first and second are sequential ASCII strings.
3107   // Assume that they are non-smis.
3108   lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
3109   lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
3110   lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
3111   lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3112 
3113   JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
3114                                                scratch2,
3115                                                scratch1,
3116                                                scratch2,
3117                                                failure);
3118 }
3119 
3120 
JumpIfNotBothSequentialAsciiStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3121 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
3122                                                          Register second,
3123                                                          Register scratch1,
3124                                                          Register scratch2,
3125                                                          Label* failure) {
3126   // Check that neither is a smi.
3127   STATIC_ASSERT(kSmiTag == 0);
3128   And(scratch1, first, Operand(second));
3129   And(scratch1, scratch1, Operand(kSmiTagMask));
3130   Branch(failure, eq, scratch1, Operand(zero_reg));
3131   JumpIfNonSmisNotBothSequentialAsciiStrings(first,
3132                                              second,
3133                                              scratch1,
3134                                              scratch2,
3135                                              failure);
3136 }
3137 
3138 
JumpIfBothInstanceTypesAreNotSequentialAscii(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3139 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
3140     Register first,
3141     Register second,
3142     Register scratch1,
3143     Register scratch2,
3144     Label* failure) {
3145   int kFlatAsciiStringMask =
3146       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3147   int kFlatAsciiStringTag = ASCII_STRING_TYPE;
3148   ASSERT(kFlatAsciiStringTag <= 0xffff);  // Ensure this fits 16-bit immed.
3149   andi(scratch1, first, kFlatAsciiStringMask);
3150   Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
3151   andi(scratch2, second, kFlatAsciiStringMask);
3152   Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
3153 }
3154 
3155 
JumpIfInstanceTypeIsNotSequentialAscii(Register type,Register scratch,Label * failure)3156 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
3157                                                             Register scratch,
3158                                                             Label* failure) {
3159   int kFlatAsciiStringMask =
3160       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3161   int kFlatAsciiStringTag = ASCII_STRING_TYPE;
3162   And(scratch, type, Operand(kFlatAsciiStringMask));
3163   Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
3164 }
3165 
3166 
3167 static const int kRegisterPassedArguments = 4;
3168 
PrepareCallCFunction(int num_arguments,Register scratch)3169 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
3170   int frame_alignment = ActivationFrameAlignment();
3171 
3172   // Reserve space for Isolate address which is always passed as last parameter
3173   num_arguments += 1;
3174 
3175   // Up to four simple arguments are passed in registers a0..a3.
3176   // Those four arguments must have reserved argument slots on the stack for
3177   // mips, even though those argument slots are not normally used.
3178   // Remaining arguments are pushed on the stack, above (higher address than)
3179   // the argument slots.
3180   ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
3181   int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
3182                                  0 : num_arguments - kRegisterPassedArguments) +
3183                                (StandardFrameConstants::kCArgsSlotsSize /
3184                                kPointerSize);
3185   if (frame_alignment > kPointerSize) {
3186     // Make stack end at alignment and make room for num_arguments - 4 words
3187     // and the original value of sp.
3188     mov(scratch, sp);
3189     Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3190     ASSERT(IsPowerOf2(frame_alignment));
3191     And(sp, sp, Operand(-frame_alignment));
3192     sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3193   } else {
3194     Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3195   }
3196 }
3197 
3198 
CallCFunction(ExternalReference function,int num_arguments)3199 void MacroAssembler::CallCFunction(ExternalReference function,
3200                                    int num_arguments) {
3201   CallCFunctionHelper(no_reg, function, at, num_arguments);
3202 }
3203 
3204 
CallCFunction(Register function,Register scratch,int num_arguments)3205 void MacroAssembler::CallCFunction(Register function,
3206                                    Register scratch,
3207                                    int num_arguments) {
3208   CallCFunctionHelper(function,
3209                       ExternalReference::the_hole_value_location(isolate()),
3210                       scratch,
3211                       num_arguments);
3212 }
3213 
3214 
CallCFunctionHelper(Register function,ExternalReference function_reference,Register scratch,int num_arguments)3215 void MacroAssembler::CallCFunctionHelper(Register function,
3216                                          ExternalReference function_reference,
3217                                          Register scratch,
3218                                          int num_arguments) {
3219   // Push Isolate address as the last argument.
3220   if (num_arguments < kRegisterPassedArguments) {
3221     Register arg_to_reg[] = {a0, a1, a2, a3};
3222     Register r = arg_to_reg[num_arguments];
3223     li(r, Operand(ExternalReference::isolate_address()));
3224   } else {
3225     int stack_passed_arguments = num_arguments - kRegisterPassedArguments +
3226                                  (StandardFrameConstants::kCArgsSlotsSize /
3227                                   kPointerSize);
3228     // Push Isolate address on the stack after the arguments.
3229     li(scratch, Operand(ExternalReference::isolate_address()));
3230     sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3231   }
3232   num_arguments += 1;
3233 
3234   // Make sure that the stack is aligned before calling a C function unless
3235   // running in the simulator. The simulator has its own alignment check which
3236   // provides more information.
3237   // The argument stots are presumed to have been set up by
3238   // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
3239 
3240 #if defined(V8_HOST_ARCH_MIPS)
3241   if (emit_debug_code()) {
3242     int frame_alignment = OS::ActivationFrameAlignment();
3243     int frame_alignment_mask = frame_alignment - 1;
3244     if (frame_alignment > kPointerSize) {
3245       ASSERT(IsPowerOf2(frame_alignment));
3246       Label alignment_as_expected;
3247       And(at, sp, Operand(frame_alignment_mask));
3248       Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
3249       // Don't use Check here, as it will call Runtime_Abort possibly
3250       // re-entering here.
3251       stop("Unexpected alignment in CallCFunction");
3252       bind(&alignment_as_expected);
3253     }
3254   }
3255 #endif  // V8_HOST_ARCH_MIPS
3256 
3257   // Just call directly. The function called cannot cause a GC, or
3258   // allow preemption, so the return address in the link register
3259   // stays correct.
3260   if (!function.is(t9)) {
3261     mov(t9, function);
3262     function = t9;
3263   }
3264 
3265   if (function.is(no_reg)) {
3266     li(t9, Operand(function_reference));
3267     function = t9;
3268   }
3269 
3270   Call(function);
3271 
3272   ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
3273   int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
3274                                 0 : num_arguments - kRegisterPassedArguments) +
3275                                (StandardFrameConstants::kCArgsSlotsSize /
3276                                kPointerSize);
3277 
3278   if (OS::ActivationFrameAlignment() > kPointerSize) {
3279     lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3280   } else {
3281     Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
3282   }
3283 }
3284 
3285 
3286 #undef BRANCH_ARGS_CHECK
3287 
3288 
3289 #ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher(byte * address,int instructions)3290 CodePatcher::CodePatcher(byte* address, int instructions)
3291     : address_(address),
3292       instructions_(instructions),
3293       size_(instructions * Assembler::kInstrSize),
3294       masm_(address, size_ + Assembler::kGap) {
3295   // Create a new macro assembler pointing to the address of the code to patch.
3296   // The size is adjusted with kGap on order for the assembler to generate size
3297   // bytes of instructions without failing with buffer size constraints.
3298   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3299 }
3300 
3301 
~CodePatcher()3302 CodePatcher::~CodePatcher() {
3303   // Indicate that code has changed.
3304   CPU::FlushICache(address_, size_);
3305 
3306   // Check that the code was patched as expected.
3307   ASSERT(masm_.pc_ == address_ + size_);
3308   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3309 }
3310 
3311 
Emit(Instr x)3312 void CodePatcher::Emit(Instr x) {
3313   masm()->emit(x);
3314 }
3315 
3316 
Emit(Address addr)3317 void CodePatcher::Emit(Address addr) {
3318   masm()->emit(reinterpret_cast<Instr>(addr));
3319 }
3320 
3321 
3322 #endif  // ENABLE_DEBUGGER_SUPPORT
3323 
3324 
3325 } }  // namespace v8::internal
3326 
3327 #endif  // V8_TARGET_ARCH_MIPS
3328