• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 
36 #include "src/v8.h"
37 
38 #if V8_TARGET_ARCH_MIPS
39 
40 #include "src/mips/assembler-mips-inl.h"
41 #include "src/serialize.h"
42 
43 namespace v8 {
44 namespace internal {
45 
46 // Get the CPU features enabled by the build. For cross compilation the
47 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
48 // can be defined to enable FPU instructions when building the
49 // snapshot.
CpuFeaturesImpliedByCompiler()50 static unsigned CpuFeaturesImpliedByCompiler() {
51   unsigned answer = 0;
52 #ifdef CAN_USE_FPU_INSTRUCTIONS
53   answer |= 1u << FPU;
54 #endif  // def CAN_USE_FPU_INSTRUCTIONS
55 
56   // If the compiler is allowed to use FPU then we can use FPU too in our code
57   // generation even when generating snapshots.  This won't work for cross
58   // compilation.
59 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
60   answer |= 1u << FPU;
61 #endif
62 
63   return answer;
64 }
65 
66 
AllocationIndexToString(int index)67 const char* DoubleRegister::AllocationIndexToString(int index) {
68   ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
69   const char* const names[] = {
70     "f0",
71     "f2",
72     "f4",
73     "f6",
74     "f8",
75     "f10",
76     "f12",
77     "f14",
78     "f16",
79     "f18",
80     "f20",
81     "f22",
82     "f24",
83     "f26"
84   };
85   return names[index];
86 }
87 
88 
ProbeImpl(bool cross_compile)89 void CpuFeatures::ProbeImpl(bool cross_compile) {
90   supported_ |= CpuFeaturesImpliedByCompiler();
91 
92   // Only use statically determined features for cross compile (snapshot).
93   if (cross_compile) return;
94 
95   // If the compiler is allowed to use fpu then we can use fpu too in our
96   // code generation.
97 #ifndef __mips__
98   // For the simulator build, use FPU.
99   supported_ |= 1u << FPU;
100 #else
101   // Probe for additional features at runtime.
102   CPU cpu;
103   if (cpu.has_fpu()) supported_ |= 1u << FPU;
104 #endif
105 }
106 
107 
PrintTarget()108 void CpuFeatures::PrintTarget() { }
PrintFeatures()109 void CpuFeatures::PrintFeatures() { }
110 
111 
ToNumber(Register reg)112 int ToNumber(Register reg) {
113   ASSERT(reg.is_valid());
114   const int kNumbers[] = {
115     0,    // zero_reg
116     1,    // at
117     2,    // v0
118     3,    // v1
119     4,    // a0
120     5,    // a1
121     6,    // a2
122     7,    // a3
123     8,    // t0
124     9,    // t1
125     10,   // t2
126     11,   // t3
127     12,   // t4
128     13,   // t5
129     14,   // t6
130     15,   // t7
131     16,   // s0
132     17,   // s1
133     18,   // s2
134     19,   // s3
135     20,   // s4
136     21,   // s5
137     22,   // s6
138     23,   // s7
139     24,   // t8
140     25,   // t9
141     26,   // k0
142     27,   // k1
143     28,   // gp
144     29,   // sp
145     30,   // fp
146     31,   // ra
147   };
148   return kNumbers[reg.code()];
149 }
150 
151 
ToRegister(int num)152 Register ToRegister(int num) {
153   ASSERT(num >= 0 && num < kNumRegisters);
154   const Register kRegisters[] = {
155     zero_reg,
156     at,
157     v0, v1,
158     a0, a1, a2, a3,
159     t0, t1, t2, t3, t4, t5, t6, t7,
160     s0, s1, s2, s3, s4, s5, s6, s7,
161     t8, t9,
162     k0, k1,
163     gp,
164     sp,
165     fp,
166     ra
167   };
168   return kRegisters[num];
169 }
170 
171 
172 // -----------------------------------------------------------------------------
173 // Implementation of RelocInfo.
174 
175 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
176                                   1 << RelocInfo::INTERNAL_REFERENCE;
177 
178 
IsCodedSpecially()179 bool RelocInfo::IsCodedSpecially() {
180   // The deserializer needs to know whether a pointer is specially coded.  Being
181   // specially coded on MIPS means that it is a lui/ori instruction, and that is
182   // always the case inside code objects.
183   return true;
184 }
185 
186 
IsInConstantPool()187 bool RelocInfo::IsInConstantPool() {
188   return false;
189 }
190 
191 
192 // Patch the code at the current address with the supplied instructions.
PatchCode(byte * instructions,int instruction_count)193 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
194   Instr* pc = reinterpret_cast<Instr*>(pc_);
195   Instr* instr = reinterpret_cast<Instr*>(instructions);
196   for (int i = 0; i < instruction_count; i++) {
197     *(pc + i) = *(instr + i);
198   }
199 
200   // Indicate that code has changed.
201   CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
202 }
203 
204 
205 // Patch the code at the current PC with a call to the target address.
206 // Additional guard instructions can be added if required.
PatchCodeWithCall(Address target,int guard_bytes)207 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
208   // Patch the code at the current address with a call to the target.
209   UNIMPLEMENTED_MIPS();
210 }
211 
212 
213 // -----------------------------------------------------------------------------
214 // Implementation of Operand and MemOperand.
215 // See assembler-mips-inl.h for inlined constructors.
216 
Operand(Handle<Object> handle)217 Operand::Operand(Handle<Object> handle) {
218   AllowDeferredHandleDereference using_raw_address;
219   rm_ = no_reg;
220   // Verify all Objects referred by code are NOT in new space.
221   Object* obj = *handle;
222   if (obj->IsHeapObject()) {
223     ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
224     imm32_ = reinterpret_cast<intptr_t>(handle.location());
225     rmode_ = RelocInfo::EMBEDDED_OBJECT;
226   } else {
227     // No relocation needed.
228     imm32_ = reinterpret_cast<intptr_t>(obj);
229     rmode_ = RelocInfo::NONE32;
230   }
231 }
232 
233 
MemOperand(Register rm,int32_t offset)234 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
235   offset_ = offset;
236 }
237 
238 
MemOperand(Register rm,int32_t unit,int32_t multiplier,OffsetAddend offset_addend)239 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
240                        OffsetAddend offset_addend) : Operand(rm) {
241   offset_ = unit * multiplier + offset_addend;
242 }
243 
244 
245 // -----------------------------------------------------------------------------
246 // Specific instructions, constants, and masks.
247 
248 static const int kNegOffset = 0x00008000;
249 // addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
250 // operations as post-increment of sp.
251 const Instr kPopInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
252       | (kRegister_sp_Code << kRtShift)
253       | (kPointerSize & kImm16Mask);  // NOLINT
254 // addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
255 const Instr kPushInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
256       | (kRegister_sp_Code << kRtShift)
257       | (-kPointerSize & kImm16Mask);  // NOLINT
258 // sw(r, MemOperand(sp, 0))
259 const Instr kPushRegPattern = SW | (kRegister_sp_Code << kRsShift)
260       | (0 & kImm16Mask);  // NOLINT
261 //  lw(r, MemOperand(sp, 0))
262 const Instr kPopRegPattern = LW | (kRegister_sp_Code << kRsShift)
263       | (0 & kImm16Mask);  // NOLINT
264 
265 const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
266       | (0 & kImm16Mask);  // NOLINT
267 
268 const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
269       | (0 & kImm16Mask);  // NOLINT
270 
271 const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
272       | (kNegOffset & kImm16Mask);  // NOLINT
273 
274 const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
275       | (kNegOffset & kImm16Mask);  // NOLINT
276 // A mask for the Rt register for push, pop, lw, sw instructions.
277 const Instr kRtMask = kRtFieldMask;
278 const Instr kLwSwInstrTypeMask = 0xffe00000;
279 const Instr kLwSwInstrArgumentMask  = ~kLwSwInstrTypeMask;
280 const Instr kLwSwOffsetMask = kImm16Mask;
281 
282 
Assembler(Isolate * isolate,void * buffer,int buffer_size)283 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
284     : AssemblerBase(isolate, buffer, buffer_size),
285       recorded_ast_id_(TypeFeedbackId::None()),
286       positions_recorder_(this) {
287   reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
288 
289   last_trampoline_pool_end_ = 0;
290   no_trampoline_pool_before_ = 0;
291   trampoline_pool_blocked_nesting_ = 0;
292   // We leave space (16 * kTrampolineSlotsSize)
293   // for BlockTrampolinePoolScope buffer.
294   next_buffer_check_ = FLAG_force_long_branches
295       ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
296   internal_trampoline_exception_ = false;
297   last_bound_pos_ = 0;
298 
299   trampoline_emitted_ = FLAG_force_long_branches;
300   unbound_labels_count_ = 0;
301   block_buffer_growth_ = false;
302 
303   ClearRecordedAstId();
304 }
305 
306 
GetCode(CodeDesc * desc)307 void Assembler::GetCode(CodeDesc* desc) {
308   ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
309   // Set up code descriptor.
310   desc->buffer = buffer_;
311   desc->buffer_size = buffer_size_;
312   desc->instr_size = pc_offset();
313   desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
314   desc->origin = this;
315 }
316 
317 
Align(int m)318 void Assembler::Align(int m) {
319   ASSERT(m >= 4 && IsPowerOf2(m));
320   while ((pc_offset() & (m - 1)) != 0) {
321     nop();
322   }
323 }
324 
325 
CodeTargetAlign()326 void Assembler::CodeTargetAlign() {
327   // No advantage to aligning branch/call targets to more than
328   // single instruction, that I am aware of.
329   Align(4);
330 }
331 
332 
GetRtReg(Instr instr)333 Register Assembler::GetRtReg(Instr instr) {
334   Register rt;
335   rt.code_ = (instr & kRtFieldMask) >> kRtShift;
336   return rt;
337 }
338 
339 
GetRsReg(Instr instr)340 Register Assembler::GetRsReg(Instr instr) {
341   Register rs;
342   rs.code_ = (instr & kRsFieldMask) >> kRsShift;
343   return rs;
344 }
345 
346 
GetRdReg(Instr instr)347 Register Assembler::GetRdReg(Instr instr) {
348   Register rd;
349   rd.code_ = (instr & kRdFieldMask) >> kRdShift;
350   return rd;
351 }
352 
353 
GetRt(Instr instr)354 uint32_t Assembler::GetRt(Instr instr) {
355   return (instr & kRtFieldMask) >> kRtShift;
356 }
357 
358 
GetRtField(Instr instr)359 uint32_t Assembler::GetRtField(Instr instr) {
360   return instr & kRtFieldMask;
361 }
362 
363 
GetRs(Instr instr)364 uint32_t Assembler::GetRs(Instr instr) {
365   return (instr & kRsFieldMask) >> kRsShift;
366 }
367 
368 
GetRsField(Instr instr)369 uint32_t Assembler::GetRsField(Instr instr) {
370   return instr & kRsFieldMask;
371 }
372 
373 
GetRd(Instr instr)374 uint32_t Assembler::GetRd(Instr instr) {
375   return  (instr & kRdFieldMask) >> kRdShift;
376 }
377 
378 
GetRdField(Instr instr)379 uint32_t Assembler::GetRdField(Instr instr) {
380   return  instr & kRdFieldMask;
381 }
382 
383 
GetSa(Instr instr)384 uint32_t Assembler::GetSa(Instr instr) {
385   return (instr & kSaFieldMask) >> kSaShift;
386 }
387 
388 
GetSaField(Instr instr)389 uint32_t Assembler::GetSaField(Instr instr) {
390   return instr & kSaFieldMask;
391 }
392 
393 
GetOpcodeField(Instr instr)394 uint32_t Assembler::GetOpcodeField(Instr instr) {
395   return instr & kOpcodeMask;
396 }
397 
398 
GetFunction(Instr instr)399 uint32_t Assembler::GetFunction(Instr instr) {
400   return (instr & kFunctionFieldMask) >> kFunctionShift;
401 }
402 
403 
GetFunctionField(Instr instr)404 uint32_t Assembler::GetFunctionField(Instr instr) {
405   return instr & kFunctionFieldMask;
406 }
407 
408 
GetImmediate16(Instr instr)409 uint32_t Assembler::GetImmediate16(Instr instr) {
410   return instr & kImm16Mask;
411 }
412 
413 
GetLabelConst(Instr instr)414 uint32_t Assembler::GetLabelConst(Instr instr) {
415   return instr & ~kImm16Mask;
416 }
417 
418 
IsPop(Instr instr)419 bool Assembler::IsPop(Instr instr) {
420   return (instr & ~kRtMask) == kPopRegPattern;
421 }
422 
423 
IsPush(Instr instr)424 bool Assembler::IsPush(Instr instr) {
425   return (instr & ~kRtMask) == kPushRegPattern;
426 }
427 
428 
IsSwRegFpOffset(Instr instr)429 bool Assembler::IsSwRegFpOffset(Instr instr) {
430   return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
431 }
432 
433 
IsLwRegFpOffset(Instr instr)434 bool Assembler::IsLwRegFpOffset(Instr instr) {
435   return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
436 }
437 
438 
IsSwRegFpNegOffset(Instr instr)439 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
440   return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
441           kSwRegFpNegOffsetPattern);
442 }
443 
444 
IsLwRegFpNegOffset(Instr instr)445 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
446   return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
447           kLwRegFpNegOffsetPattern);
448 }
449 
450 
451 // Labels refer to positions in the (to be) generated code.
452 // There are bound, linked, and unused labels.
453 //
454 // Bound labels refer to known positions in the already
455 // generated code. pos() is the position the label refers to.
456 //
457 // Linked labels refer to unknown positions in the code
458 // to be generated; pos() is the position of the last
459 // instruction using the label.
460 
461 // The link chain is terminated by a value in the instruction of -1,
462 // which is an otherwise illegal value (branch -1 is inf loop).
463 // The instruction 16-bit offset field addresses 32-bit words, but in
464 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
465 
466 const int kEndOfChain = -4;
467 // Determines the end of the Jump chain (a subset of the label link chain).
468 const int kEndOfJumpChain = 0;
469 
470 
IsBranch(Instr instr)471 bool Assembler::IsBranch(Instr instr) {
472   uint32_t opcode   = GetOpcodeField(instr);
473   uint32_t rt_field = GetRtField(instr);
474   uint32_t rs_field = GetRsField(instr);
475   // Checks if the instruction is a branch.
476   return opcode == BEQ ||
477       opcode == BNE ||
478       opcode == BLEZ ||
479       opcode == BGTZ ||
480       opcode == BEQL ||
481       opcode == BNEL ||
482       opcode == BLEZL ||
483       opcode == BGTZL ||
484       (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
485                             rt_field == BLTZAL || rt_field == BGEZAL)) ||
486       (opcode == COP1 && rs_field == BC1);  // Coprocessor branch.
487 }
488 
489 
IsEmittedConstant(Instr instr)490 bool Assembler::IsEmittedConstant(Instr instr) {
491   uint32_t label_constant = GetLabelConst(instr);
492   return label_constant == 0;  // Emitted label const in reg-exp engine.
493 }
494 
495 
IsBeq(Instr instr)496 bool Assembler::IsBeq(Instr instr) {
497   return GetOpcodeField(instr) == BEQ;
498 }
499 
500 
IsBne(Instr instr)501 bool Assembler::IsBne(Instr instr) {
502   return GetOpcodeField(instr) == BNE;
503 }
504 
505 
IsJump(Instr instr)506 bool Assembler::IsJump(Instr instr) {
507   uint32_t opcode   = GetOpcodeField(instr);
508   uint32_t rt_field = GetRtField(instr);
509   uint32_t rd_field = GetRdField(instr);
510   uint32_t function_field = GetFunctionField(instr);
511   // Checks if the instruction is a jump.
512   return opcode == J || opcode == JAL ||
513       (opcode == SPECIAL && rt_field == 0 &&
514       ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
515 }
516 
517 
IsJ(Instr instr)518 bool Assembler::IsJ(Instr instr) {
519   uint32_t opcode = GetOpcodeField(instr);
520   // Checks if the instruction is a jump.
521   return opcode == J;
522 }
523 
524 
IsJal(Instr instr)525 bool Assembler::IsJal(Instr instr) {
526   return GetOpcodeField(instr) == JAL;
527 }
528 
529 
IsJr(Instr instr)530 bool Assembler::IsJr(Instr instr) {
531   return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
532 }
533 
534 
IsJalr(Instr instr)535 bool Assembler::IsJalr(Instr instr) {
536   return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
537 }
538 
539 
IsLui(Instr instr)540 bool Assembler::IsLui(Instr instr) {
541   uint32_t opcode = GetOpcodeField(instr);
542   // Checks if the instruction is a load upper immediate.
543   return opcode == LUI;
544 }
545 
546 
IsOri(Instr instr)547 bool Assembler::IsOri(Instr instr) {
548   uint32_t opcode = GetOpcodeField(instr);
549   // Checks if the instruction is a load upper immediate.
550   return opcode == ORI;
551 }
552 
553 
IsNop(Instr instr,unsigned int type)554 bool Assembler::IsNop(Instr instr, unsigned int type) {
555   // See Assembler::nop(type).
556   ASSERT(type < 32);
557   uint32_t opcode = GetOpcodeField(instr);
558   uint32_t function = GetFunctionField(instr);
559   uint32_t rt = GetRt(instr);
560   uint32_t rd = GetRd(instr);
561   uint32_t sa = GetSa(instr);
562 
563   // Traditional mips nop == sll(zero_reg, zero_reg, 0)
564   // When marking non-zero type, use sll(zero_reg, at, type)
565   // to avoid use of mips ssnop and ehb special encodings
566   // of the sll instruction.
567 
568   Register nop_rt_reg = (type == 0) ? zero_reg : at;
569   bool ret = (opcode == SPECIAL && function == SLL &&
570               rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
571               rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
572               sa == type);
573 
574   return ret;
575 }
576 
577 
GetBranchOffset(Instr instr)578 int32_t Assembler::GetBranchOffset(Instr instr) {
579   ASSERT(IsBranch(instr));
580   return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
581 }
582 
583 
IsLw(Instr instr)584 bool Assembler::IsLw(Instr instr) {
585   return ((instr & kOpcodeMask) == LW);
586 }
587 
588 
GetLwOffset(Instr instr)589 int16_t Assembler::GetLwOffset(Instr instr) {
590   ASSERT(IsLw(instr));
591   return ((instr & kImm16Mask));
592 }
593 
594 
SetLwOffset(Instr instr,int16_t offset)595 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
596   ASSERT(IsLw(instr));
597 
598   // We actually create a new lw instruction based on the original one.
599   Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
600       | (offset & kImm16Mask);
601 
602   return temp_instr;
603 }
604 
605 
IsSw(Instr instr)606 bool Assembler::IsSw(Instr instr) {
607   return ((instr & kOpcodeMask) == SW);
608 }
609 
610 
SetSwOffset(Instr instr,int16_t offset)611 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
612   ASSERT(IsSw(instr));
613   return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
614 }
615 
616 
IsAddImmediate(Instr instr)617 bool Assembler::IsAddImmediate(Instr instr) {
618   return ((instr & kOpcodeMask) == ADDIU);
619 }
620 
621 
SetAddImmediateOffset(Instr instr,int16_t offset)622 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
623   ASSERT(IsAddImmediate(instr));
624   return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
625 }
626 
627 
IsAndImmediate(Instr instr)628 bool Assembler::IsAndImmediate(Instr instr) {
629   return GetOpcodeField(instr) == ANDI;
630 }
631 
632 
target_at(int32_t pos)633 int Assembler::target_at(int32_t pos) {
634   Instr instr = instr_at(pos);
635   if ((instr & ~kImm16Mask) == 0) {
636     // Emitted label constant, not part of a branch.
637     if (instr == 0) {
638        return kEndOfChain;
639      } else {
640        int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
641        return (imm18 + pos);
642      }
643   }
644   // Check we have a branch or jump instruction.
645   ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
646   // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
647   // the compiler uses arithmectic shifts for signed integers.
648   if (IsBranch(instr)) {
649     int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
650 
651     if (imm18 == kEndOfChain) {
652       // EndOfChain sentinel is returned directly, not relative to pc or pos.
653       return kEndOfChain;
654     } else {
655       return pos + kBranchPCOffset + imm18;
656     }
657   } else if (IsLui(instr)) {
658     Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
659     Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
660     ASSERT(IsOri(instr_ori));
661     int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
662     imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
663 
664     if (imm == kEndOfJumpChain) {
665       // EndOfChain sentinel is returned directly, not relative to pc or pos.
666       return kEndOfChain;
667     } else {
668       uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
669       int32_t delta = instr_address - imm;
670       ASSERT(pos > delta);
671       return pos - delta;
672     }
673   } else {
674     int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
675     if (imm28 == kEndOfJumpChain) {
676       // EndOfChain sentinel is returned directly, not relative to pc or pos.
677       return kEndOfChain;
678     } else {
679       uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
680       instr_address &= kImm28Mask;
681       int32_t delta = instr_address - imm28;
682       ASSERT(pos > delta);
683       return pos - delta;
684     }
685   }
686 }
687 
688 
target_at_put(int32_t pos,int32_t target_pos)689 void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
690   Instr instr = instr_at(pos);
691   if ((instr & ~kImm16Mask) == 0) {
692     ASSERT(target_pos == kEndOfChain || target_pos >= 0);
693     // Emitted label constant, not part of a branch.
694     // Make label relative to Code* of generated Code object.
695     instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
696     return;
697   }
698 
699   ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
700   if (IsBranch(instr)) {
701     int32_t imm18 = target_pos - (pos + kBranchPCOffset);
702     ASSERT((imm18 & 3) == 0);
703 
704     instr &= ~kImm16Mask;
705     int32_t imm16 = imm18 >> 2;
706     ASSERT(is_int16(imm16));
707 
708     instr_at_put(pos, instr | (imm16 & kImm16Mask));
709   } else if (IsLui(instr)) {
710     Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
711     Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
712     ASSERT(IsOri(instr_ori));
713     uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
714     ASSERT((imm & 3) == 0);
715 
716     instr_lui &= ~kImm16Mask;
717     instr_ori &= ~kImm16Mask;
718 
719     instr_at_put(pos + 0 * Assembler::kInstrSize,
720                  instr_lui | ((imm & kHiMask) >> kLuiShift));
721     instr_at_put(pos + 1 * Assembler::kInstrSize,
722                  instr_ori | (imm & kImm16Mask));
723   } else {
724     uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos;
725     imm28 &= kImm28Mask;
726     ASSERT((imm28 & 3) == 0);
727 
728     instr &= ~kImm26Mask;
729     uint32_t imm26 = imm28 >> 2;
730     ASSERT(is_uint26(imm26));
731 
732     instr_at_put(pos, instr | (imm26 & kImm26Mask));
733   }
734 }
735 
736 
print(Label * L)737 void Assembler::print(Label* L) {
738   if (L->is_unused()) {
739     PrintF("unused label\n");
740   } else if (L->is_bound()) {
741     PrintF("bound label to %d\n", L->pos());
742   } else if (L->is_linked()) {
743     Label l = *L;
744     PrintF("unbound label");
745     while (l.is_linked()) {
746       PrintF("@ %d ", l.pos());
747       Instr instr = instr_at(l.pos());
748       if ((instr & ~kImm16Mask) == 0) {
749         PrintF("value\n");
750       } else {
751         PrintF("%d\n", instr);
752       }
753       next(&l);
754     }
755   } else {
756     PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
757   }
758 }
759 
760 
bind_to(Label * L,int pos)761 void Assembler::bind_to(Label* L, int pos) {
762   ASSERT(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
763   int32_t trampoline_pos = kInvalidSlotPos;
764   if (L->is_linked() && !trampoline_emitted_) {
765     unbound_labels_count_--;
766     next_buffer_check_ += kTrampolineSlotsSize;
767   }
768 
769   while (L->is_linked()) {
770     int32_t fixup_pos = L->pos();
771     int32_t dist = pos - fixup_pos;
772     next(L);  // Call next before overwriting link with target at fixup_pos.
773     Instr instr = instr_at(fixup_pos);
774     if (IsBranch(instr)) {
775       if (dist > kMaxBranchOffset) {
776         if (trampoline_pos == kInvalidSlotPos) {
777           trampoline_pos = get_trampoline_entry(fixup_pos);
778           CHECK(trampoline_pos != kInvalidSlotPos);
779         }
780         ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
781         target_at_put(fixup_pos, trampoline_pos);
782         fixup_pos = trampoline_pos;
783         dist = pos - fixup_pos;
784       }
785       target_at_put(fixup_pos, pos);
786     } else {
787       ASSERT(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr));
788       target_at_put(fixup_pos, pos);
789     }
790   }
791   L->bind_to(pos);
792 
793   // Keep track of the last bound label so we don't eliminate any instructions
794   // before a bound label.
795   if (pos > last_bound_pos_)
796     last_bound_pos_ = pos;
797 }
798 
799 
bind(Label * L)800 void Assembler::bind(Label* L) {
801   ASSERT(!L->is_bound());  // Label can only be bound once.
802   bind_to(L, pc_offset());
803 }
804 
805 
next(Label * L)806 void Assembler::next(Label* L) {
807   ASSERT(L->is_linked());
808   int link = target_at(L->pos());
809   if (link == kEndOfChain) {
810     L->Unuse();
811   } else {
812     ASSERT(link >= 0);
813     L->link_to(link);
814   }
815 }
816 
817 
is_near(Label * L)818 bool Assembler::is_near(Label* L) {
819   if (L->is_bound()) {
820     return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
821   }
822   return false;
823 }
824 
825 
826 // We have to use a temporary register for things that can be relocated even
827 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
828 // space.  There is no guarantee that the relocated location can be similarly
829 // encoded.
MustUseReg(RelocInfo::Mode rmode)830 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
831   return !RelocInfo::IsNone(rmode);
832 }
833 
GenInstrRegister(Opcode opcode,Register rs,Register rt,Register rd,uint16_t sa,SecondaryField func)834 void Assembler::GenInstrRegister(Opcode opcode,
835                                  Register rs,
836                                  Register rt,
837                                  Register rd,
838                                  uint16_t sa,
839                                  SecondaryField func) {
840   ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
841   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
842       | (rd.code() << kRdShift) | (sa << kSaShift) | func;
843   emit(instr);
844 }
845 
846 
GenInstrRegister(Opcode opcode,Register rs,Register rt,uint16_t msb,uint16_t lsb,SecondaryField func)847 void Assembler::GenInstrRegister(Opcode opcode,
848                                  Register rs,
849                                  Register rt,
850                                  uint16_t msb,
851                                  uint16_t lsb,
852                                  SecondaryField func) {
853   ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
854   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
855       | (msb << kRdShift) | (lsb << kSaShift) | func;
856   emit(instr);
857 }
858 
859 
GenInstrRegister(Opcode opcode,SecondaryField fmt,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)860 void Assembler::GenInstrRegister(Opcode opcode,
861                                  SecondaryField fmt,
862                                  FPURegister ft,
863                                  FPURegister fs,
864                                  FPURegister fd,
865                                  SecondaryField func) {
866   ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
867   Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
868       | (fd.code() << kFdShift) | func;
869   emit(instr);
870 }
871 
872 
GenInstrRegister(Opcode opcode,FPURegister fr,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)873 void Assembler::GenInstrRegister(Opcode opcode,
874                                  FPURegister fr,
875                                  FPURegister ft,
876                                  FPURegister fs,
877                                  FPURegister fd,
878                                  SecondaryField func) {
879   ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
880   Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
881       | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
882   emit(instr);
883 }
884 
885 
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPURegister fs,FPURegister fd,SecondaryField func)886 void Assembler::GenInstrRegister(Opcode opcode,
887                                  SecondaryField fmt,
888                                  Register rt,
889                                  FPURegister fs,
890                                  FPURegister fd,
891                                  SecondaryField func) {
892   ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
893   Instr instr = opcode | fmt | (rt.code() << kRtShift)
894       | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
895   emit(instr);
896 }
897 
898 
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPUControlRegister fs,SecondaryField func)899 void Assembler::GenInstrRegister(Opcode opcode,
900                                  SecondaryField fmt,
901                                  Register rt,
902                                  FPUControlRegister fs,
903                                  SecondaryField func) {
904   ASSERT(fs.is_valid() && rt.is_valid());
905   Instr instr =
906       opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
907   emit(instr);
908 }
909 
910 
911 // Instructions with immediate value.
912 // Registers are in the order of the instruction encoding, from left to right.
GenInstrImmediate(Opcode opcode,Register rs,Register rt,int32_t j)913 void Assembler::GenInstrImmediate(Opcode opcode,
914                                   Register rs,
915                                   Register rt,
916                                   int32_t j) {
917   ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
918   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
919       | (j & kImm16Mask);
920   emit(instr);
921 }
922 
923 
GenInstrImmediate(Opcode opcode,Register rs,SecondaryField SF,int32_t j)924 void Assembler::GenInstrImmediate(Opcode opcode,
925                                   Register rs,
926                                   SecondaryField SF,
927                                   int32_t j) {
928   ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
929   Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
930   emit(instr);
931 }
932 
933 
GenInstrImmediate(Opcode opcode,Register rs,FPURegister ft,int32_t j)934 void Assembler::GenInstrImmediate(Opcode opcode,
935                                   Register rs,
936                                   FPURegister ft,
937                                   int32_t j) {
938   ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
939   Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
940       | (j & kImm16Mask);
941   emit(instr);
942 }
943 
944 
GenInstrJump(Opcode opcode,uint32_t address)945 void Assembler::GenInstrJump(Opcode opcode,
946                              uint32_t address) {
947   BlockTrampolinePoolScope block_trampoline_pool(this);
948   ASSERT(is_uint26(address));
949   Instr instr = opcode | address;
950   emit(instr);
951   BlockTrampolinePoolFor(1);  // For associated delay slot.
952 }
953 
954 
955 // Returns the next free trampoline entry.
get_trampoline_entry(int32_t pos)956 int32_t Assembler::get_trampoline_entry(int32_t pos) {
957   int32_t trampoline_entry = kInvalidSlotPos;
958 
959   if (!internal_trampoline_exception_) {
960     if (trampoline_.start() > pos) {
961      trampoline_entry = trampoline_.take_slot();
962     }
963 
964     if (kInvalidSlotPos == trampoline_entry) {
965       internal_trampoline_exception_ = true;
966     }
967   }
968   return trampoline_entry;
969 }
970 
971 
jump_address(Label * L)972 uint32_t Assembler::jump_address(Label* L) {
973   int32_t target_pos;
974 
975   if (L->is_bound()) {
976     target_pos = L->pos();
977   } else {
978     if (L->is_linked()) {
979       target_pos = L->pos();  // L's link.
980       L->link_to(pc_offset());
981     } else {
982       L->link_to(pc_offset());
983       return kEndOfJumpChain;
984     }
985   }
986 
987   uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
988   ASSERT((imm & 3) == 0);
989 
990   return imm;
991 }
992 
993 
branch_offset(Label * L,bool jump_elimination_allowed)994 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
995   int32_t target_pos;
996 
997   if (L->is_bound()) {
998     target_pos = L->pos();
999   } else {
1000     if (L->is_linked()) {
1001       target_pos = L->pos();
1002       L->link_to(pc_offset());
1003     } else {
1004       L->link_to(pc_offset());
1005       if (!trampoline_emitted_) {
1006         unbound_labels_count_++;
1007         next_buffer_check_ -= kTrampolineSlotsSize;
1008       }
1009       return kEndOfChain;
1010     }
1011   }
1012 
1013   int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1014   ASSERT((offset & 3) == 0);
1015   ASSERT(is_int16(offset >> 2));
1016 
1017   return offset;
1018 }
1019 
1020 
label_at_put(Label * L,int at_offset)1021 void Assembler::label_at_put(Label* L, int at_offset) {
1022   int target_pos;
1023   if (L->is_bound()) {
1024     target_pos = L->pos();
1025     instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1026   } else {
1027     if (L->is_linked()) {
1028       target_pos = L->pos();  // L's link.
1029       int32_t imm18 = target_pos - at_offset;
1030       ASSERT((imm18 & 3) == 0);
1031       int32_t imm16 = imm18 >> 2;
1032       ASSERT(is_int16(imm16));
1033       instr_at_put(at_offset, (imm16 & kImm16Mask));
1034     } else {
1035       target_pos = kEndOfChain;
1036       instr_at_put(at_offset, 0);
1037       if (!trampoline_emitted_) {
1038         unbound_labels_count_++;
1039         next_buffer_check_ -= kTrampolineSlotsSize;
1040       }
1041     }
1042     L->link_to(at_offset);
1043   }
1044 }
1045 
1046 
1047 //------- Branch and jump instructions --------
1048 
b(int16_t offset)1049 void Assembler::b(int16_t offset) {
1050   beq(zero_reg, zero_reg, offset);
1051 }
1052 
1053 
bal(int16_t offset)1054 void Assembler::bal(int16_t offset) {
1055   positions_recorder()->WriteRecordedPositions();
1056   bgezal(zero_reg, offset);
1057 }
1058 
1059 
beq(Register rs,Register rt,int16_t offset)1060 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1061   BlockTrampolinePoolScope block_trampoline_pool(this);
1062   GenInstrImmediate(BEQ, rs, rt, offset);
1063   BlockTrampolinePoolFor(1);  // For associated delay slot.
1064 }
1065 
1066 
bgez(Register rs,int16_t offset)1067 void Assembler::bgez(Register rs, int16_t offset) {
1068   BlockTrampolinePoolScope block_trampoline_pool(this);
1069   GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1070   BlockTrampolinePoolFor(1);  // For associated delay slot.
1071 }
1072 
1073 
bgezal(Register rs,int16_t offset)1074 void Assembler::bgezal(Register rs, int16_t offset) {
1075   BlockTrampolinePoolScope block_trampoline_pool(this);
1076   positions_recorder()->WriteRecordedPositions();
1077   GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1078   BlockTrampolinePoolFor(1);  // For associated delay slot.
1079 }
1080 
1081 
bgtz(Register rs,int16_t offset)1082 void Assembler::bgtz(Register rs, int16_t offset) {
1083   BlockTrampolinePoolScope block_trampoline_pool(this);
1084   GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1085   BlockTrampolinePoolFor(1);  // For associated delay slot.
1086 }
1087 
1088 
blez(Register rs,int16_t offset)1089 void Assembler::blez(Register rs, int16_t offset) {
1090   BlockTrampolinePoolScope block_trampoline_pool(this);
1091   GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1092   BlockTrampolinePoolFor(1);  // For associated delay slot.
1093 }
1094 
1095 
bltz(Register rs,int16_t offset)1096 void Assembler::bltz(Register rs, int16_t offset) {
1097   BlockTrampolinePoolScope block_trampoline_pool(this);
1098   GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1099   BlockTrampolinePoolFor(1);  // For associated delay slot.
1100 }
1101 
1102 
bltzal(Register rs,int16_t offset)1103 void Assembler::bltzal(Register rs, int16_t offset) {
1104   BlockTrampolinePoolScope block_trampoline_pool(this);
1105   positions_recorder()->WriteRecordedPositions();
1106   GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1107   BlockTrampolinePoolFor(1);  // For associated delay slot.
1108 }
1109 
1110 
bne(Register rs,Register rt,int16_t offset)1111 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1112   BlockTrampolinePoolScope block_trampoline_pool(this);
1113   GenInstrImmediate(BNE, rs, rt, offset);
1114   BlockTrampolinePoolFor(1);  // For associated delay slot.
1115 }
1116 
1117 
j(int32_t target)1118 void Assembler::j(int32_t target) {
1119 #if DEBUG
1120   // Get pc of delay slot.
1121   uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1122   bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1123                   (kImm26Bits + kImmFieldShift)) == 0;
1124   ASSERT(in_range && ((target & 3) == 0));
1125 #endif
1126   GenInstrJump(J, target >> 2);
1127 }
1128 
1129 
jr(Register rs)1130 void Assembler::jr(Register rs) {
1131   BlockTrampolinePoolScope block_trampoline_pool(this);
1132   if (rs.is(ra)) {
1133     positions_recorder()->WriteRecordedPositions();
1134   }
1135   GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1136   BlockTrampolinePoolFor(1);  // For associated delay slot.
1137 }
1138 
1139 
jal(int32_t target)1140 void Assembler::jal(int32_t target) {
1141 #ifdef DEBUG
1142   // Get pc of delay slot.
1143   uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1144   bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1145                   (kImm26Bits + kImmFieldShift)) == 0;
1146   ASSERT(in_range && ((target & 3) == 0));
1147 #endif
1148   positions_recorder()->WriteRecordedPositions();
1149   GenInstrJump(JAL, target >> 2);
1150 }
1151 
1152 
jalr(Register rs,Register rd)1153 void Assembler::jalr(Register rs, Register rd) {
1154   BlockTrampolinePoolScope block_trampoline_pool(this);
1155   positions_recorder()->WriteRecordedPositions();
1156   GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1157   BlockTrampolinePoolFor(1);  // For associated delay slot.
1158 }
1159 
1160 
j_or_jr(int32_t target,Register rs)1161 void Assembler::j_or_jr(int32_t target, Register rs) {
1162   // Get pc of delay slot.
1163   uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1164   bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1165                   (kImm26Bits + kImmFieldShift)) == 0;
1166   if (in_range) {
1167       j(target);
1168   } else {
1169       jr(t9);
1170   }
1171 }
1172 
1173 
jal_or_jalr(int32_t target,Register rs)1174 void Assembler::jal_or_jalr(int32_t target, Register rs) {
1175   // Get pc of delay slot.
1176   uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1177   bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1178                   (kImm26Bits+kImmFieldShift)) == 0;
1179   if (in_range) {
1180       jal(target);
1181   } else {
1182       jalr(t9);
1183   }
1184 }
1185 
1186 
1187 // -------Data-processing-instructions---------
1188 
1189 // Arithmetic.
1190 
addu(Register rd,Register rs,Register rt)1191 void Assembler::addu(Register rd, Register rs, Register rt) {
1192   GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1193 }
1194 
1195 
addiu(Register rd,Register rs,int32_t j)1196 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1197   GenInstrImmediate(ADDIU, rs, rd, j);
1198 }
1199 
1200 
subu(Register rd,Register rs,Register rt)1201 void Assembler::subu(Register rd, Register rs, Register rt) {
1202   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1203 }
1204 
1205 
mul(Register rd,Register rs,Register rt)1206 void Assembler::mul(Register rd, Register rs, Register rt) {
1207   GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1208 }
1209 
1210 
mult(Register rs,Register rt)1211 void Assembler::mult(Register rs, Register rt) {
1212   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1213 }
1214 
1215 
multu(Register rs,Register rt)1216 void Assembler::multu(Register rs, Register rt) {
1217   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1218 }
1219 
1220 
div(Register rs,Register rt)1221 void Assembler::div(Register rs, Register rt) {
1222   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1223 }
1224 
1225 
divu(Register rs,Register rt)1226 void Assembler::divu(Register rs, Register rt) {
1227   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1228 }
1229 
1230 
1231 // Logical.
1232 
and_(Register rd,Register rs,Register rt)1233 void Assembler::and_(Register rd, Register rs, Register rt) {
1234   GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1235 }
1236 
1237 
andi(Register rt,Register rs,int32_t j)1238 void Assembler::andi(Register rt, Register rs, int32_t j) {
1239   ASSERT(is_uint16(j));
1240   GenInstrImmediate(ANDI, rs, rt, j);
1241 }
1242 
1243 
or_(Register rd,Register rs,Register rt)1244 void Assembler::or_(Register rd, Register rs, Register rt) {
1245   GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1246 }
1247 
1248 
ori(Register rt,Register rs,int32_t j)1249 void Assembler::ori(Register rt, Register rs, int32_t j) {
1250   ASSERT(is_uint16(j));
1251   GenInstrImmediate(ORI, rs, rt, j);
1252 }
1253 
1254 
xor_(Register rd,Register rs,Register rt)1255 void Assembler::xor_(Register rd, Register rs, Register rt) {
1256   GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1257 }
1258 
1259 
xori(Register rt,Register rs,int32_t j)1260 void Assembler::xori(Register rt, Register rs, int32_t j) {
1261   ASSERT(is_uint16(j));
1262   GenInstrImmediate(XORI, rs, rt, j);
1263 }
1264 
1265 
nor(Register rd,Register rs,Register rt)1266 void Assembler::nor(Register rd, Register rs, Register rt) {
1267   GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1268 }
1269 
1270 
1271 // Shifts.
sll(Register rd,Register rt,uint16_t sa,bool coming_from_nop)1272 void Assembler::sll(Register rd,
1273                     Register rt,
1274                     uint16_t sa,
1275                     bool coming_from_nop) {
1276   // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1277   // generated using the sll instruction. They must be generated using
1278   // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1279   // instructions.
1280   ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1281   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
1282 }
1283 
1284 
sllv(Register rd,Register rt,Register rs)1285 void Assembler::sllv(Register rd, Register rt, Register rs) {
1286   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1287 }
1288 
1289 
srl(Register rd,Register rt,uint16_t sa)1290 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1291   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
1292 }
1293 
1294 
srlv(Register rd,Register rt,Register rs)1295 void Assembler::srlv(Register rd, Register rt, Register rs) {
1296   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1297 }
1298 
1299 
sra(Register rd,Register rt,uint16_t sa)1300 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1301   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
1302 }
1303 
1304 
srav(Register rd,Register rt,Register rs)1305 void Assembler::srav(Register rd, Register rt, Register rs) {
1306   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1307 }
1308 
1309 
rotr(Register rd,Register rt,uint16_t sa)1310 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1311   // Should be called via MacroAssembler::Ror.
1312   ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1313   ASSERT(kArchVariant == kMips32r2);
1314   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1315       | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1316   emit(instr);
1317 }
1318 
1319 
rotrv(Register rd,Register rt,Register rs)1320 void Assembler::rotrv(Register rd, Register rt, Register rs) {
1321   // Should be called via MacroAssembler::Ror.
1322   ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1323   ASSERT(kArchVariant == kMips32r2);
1324   Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1325      | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1326   emit(instr);
1327 }
1328 
1329 
1330 // ------------Memory-instructions-------------
1331 
1332 // Helper for base-reg + offset, when offset is larger than int16.
LoadRegPlusOffsetToAt(const MemOperand & src)1333 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1334   ASSERT(!src.rm().is(at));
1335   lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
1336   ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
1337   addu(at, at, src.rm());  // Add base register.
1338 }
1339 
1340 
lb(Register rd,const MemOperand & rs)1341 void Assembler::lb(Register rd, const MemOperand& rs) {
1342   if (is_int16(rs.offset_)) {
1343     GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1344   } else {  // Offset > 16 bits, use multiple instructions to load.
1345     LoadRegPlusOffsetToAt(rs);
1346     GenInstrImmediate(LB, at, rd, 0);  // Equiv to lb(rd, MemOperand(at, 0));
1347   }
1348 }
1349 
1350 
lbu(Register rd,const MemOperand & rs)1351 void Assembler::lbu(Register rd, const MemOperand& rs) {
1352   if (is_int16(rs.offset_)) {
1353     GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1354   } else {  // Offset > 16 bits, use multiple instructions to load.
1355     LoadRegPlusOffsetToAt(rs);
1356     GenInstrImmediate(LBU, at, rd, 0);  // Equiv to lbu(rd, MemOperand(at, 0));
1357   }
1358 }
1359 
1360 
lh(Register rd,const MemOperand & rs)1361 void Assembler::lh(Register rd, const MemOperand& rs) {
1362   if (is_int16(rs.offset_)) {
1363     GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1364   } else {  // Offset > 16 bits, use multiple instructions to load.
1365     LoadRegPlusOffsetToAt(rs);
1366     GenInstrImmediate(LH, at, rd, 0);  // Equiv to lh(rd, MemOperand(at, 0));
1367   }
1368 }
1369 
1370 
lhu(Register rd,const MemOperand & rs)1371 void Assembler::lhu(Register rd, const MemOperand& rs) {
1372   if (is_int16(rs.offset_)) {
1373     GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1374   } else {  // Offset > 16 bits, use multiple instructions to load.
1375     LoadRegPlusOffsetToAt(rs);
1376     GenInstrImmediate(LHU, at, rd, 0);  // Equiv to lhu(rd, MemOperand(at, 0));
1377   }
1378 }
1379 
1380 
lw(Register rd,const MemOperand & rs)1381 void Assembler::lw(Register rd, const MemOperand& rs) {
1382   if (is_int16(rs.offset_)) {
1383     GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1384   } else {  // Offset > 16 bits, use multiple instructions to load.
1385     LoadRegPlusOffsetToAt(rs);
1386     GenInstrImmediate(LW, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
1387   }
1388 }
1389 
1390 
lwl(Register rd,const MemOperand & rs)1391 void Assembler::lwl(Register rd, const MemOperand& rs) {
1392   GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1393 }
1394 
1395 
lwr(Register rd,const MemOperand & rs)1396 void Assembler::lwr(Register rd, const MemOperand& rs) {
1397   GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1398 }
1399 
1400 
sb(Register rd,const MemOperand & rs)1401 void Assembler::sb(Register rd, const MemOperand& rs) {
1402   if (is_int16(rs.offset_)) {
1403     GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1404   } else {  // Offset > 16 bits, use multiple instructions to store.
1405     LoadRegPlusOffsetToAt(rs);
1406     GenInstrImmediate(SB, at, rd, 0);  // Equiv to sb(rd, MemOperand(at, 0));
1407   }
1408 }
1409 
1410 
sh(Register rd,const MemOperand & rs)1411 void Assembler::sh(Register rd, const MemOperand& rs) {
1412   if (is_int16(rs.offset_)) {
1413     GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1414   } else {  // Offset > 16 bits, use multiple instructions to store.
1415     LoadRegPlusOffsetToAt(rs);
1416     GenInstrImmediate(SH, at, rd, 0);  // Equiv to sh(rd, MemOperand(at, 0));
1417   }
1418 }
1419 
1420 
sw(Register rd,const MemOperand & rs)1421 void Assembler::sw(Register rd, const MemOperand& rs) {
1422   if (is_int16(rs.offset_)) {
1423     GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1424   } else {  // Offset > 16 bits, use multiple instructions to store.
1425     LoadRegPlusOffsetToAt(rs);
1426     GenInstrImmediate(SW, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
1427   }
1428 }
1429 
1430 
swl(Register rd,const MemOperand & rs)1431 void Assembler::swl(Register rd, const MemOperand& rs) {
1432   GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1433 }
1434 
1435 
swr(Register rd,const MemOperand & rs)1436 void Assembler::swr(Register rd, const MemOperand& rs) {
1437   GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1438 }
1439 
1440 
lui(Register rd,int32_t j)1441 void Assembler::lui(Register rd, int32_t j) {
1442   ASSERT(is_uint16(j));
1443   GenInstrImmediate(LUI, zero_reg, rd, j);
1444 }
1445 
1446 
1447 // -------------Misc-instructions--------------
1448 
1449 // Break / Trap instructions.
break_(uint32_t code,bool break_as_stop)1450 void Assembler::break_(uint32_t code, bool break_as_stop) {
1451   ASSERT((code & ~0xfffff) == 0);
1452   // We need to invalidate breaks that could be stops as well because the
1453   // simulator expects a char pointer after the stop instruction.
1454   // See constants-mips.h for explanation.
1455   ASSERT((break_as_stop &&
1456           code <= kMaxStopCode &&
1457           code > kMaxWatchpointCode) ||
1458          (!break_as_stop &&
1459           (code > kMaxStopCode ||
1460            code <= kMaxWatchpointCode)));
1461   Instr break_instr = SPECIAL | BREAK | (code << 6);
1462   emit(break_instr);
1463 }
1464 
1465 
stop(const char * msg,uint32_t code)1466 void Assembler::stop(const char* msg, uint32_t code) {
1467   ASSERT(code > kMaxWatchpointCode);
1468   ASSERT(code <= kMaxStopCode);
1469 #if V8_HOST_ARCH_MIPS
1470   break_(0x54321);
1471 #else  // V8_HOST_ARCH_MIPS
1472   BlockTrampolinePoolFor(2);
1473   // The Simulator will handle the stop instruction and get the message address.
1474   // On MIPS stop() is just a special kind of break_().
1475   break_(code, true);
1476   emit(reinterpret_cast<Instr>(msg));
1477 #endif
1478 }
1479 
1480 
tge(Register rs,Register rt,uint16_t code)1481 void Assembler::tge(Register rs, Register rt, uint16_t code) {
1482   ASSERT(is_uint10(code));
1483   Instr instr = SPECIAL | TGE | rs.code() << kRsShift
1484       | rt.code() << kRtShift | code << 6;
1485   emit(instr);
1486 }
1487 
1488 
tgeu(Register rs,Register rt,uint16_t code)1489 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
1490   ASSERT(is_uint10(code));
1491   Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
1492       | rt.code() << kRtShift | code << 6;
1493   emit(instr);
1494 }
1495 
1496 
tlt(Register rs,Register rt,uint16_t code)1497 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
1498   ASSERT(is_uint10(code));
1499   Instr instr =
1500       SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1501   emit(instr);
1502 }
1503 
1504 
tltu(Register rs,Register rt,uint16_t code)1505 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
1506   ASSERT(is_uint10(code));
1507   Instr instr =
1508       SPECIAL | TLTU | rs.code() << kRsShift
1509       | rt.code() << kRtShift | code << 6;
1510   emit(instr);
1511 }
1512 
1513 
teq(Register rs,Register rt,uint16_t code)1514 void Assembler::teq(Register rs, Register rt, uint16_t code) {
1515   ASSERT(is_uint10(code));
1516   Instr instr =
1517       SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1518   emit(instr);
1519 }
1520 
1521 
tne(Register rs,Register rt,uint16_t code)1522 void Assembler::tne(Register rs, Register rt, uint16_t code) {
1523   ASSERT(is_uint10(code));
1524   Instr instr =
1525       SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1526   emit(instr);
1527 }
1528 
1529 
1530 // Move from HI/LO register.
1531 
mfhi(Register rd)1532 void Assembler::mfhi(Register rd) {
1533   GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
1534 }
1535 
1536 
mflo(Register rd)1537 void Assembler::mflo(Register rd) {
1538   GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
1539 }
1540 
1541 
1542 // Set on less than instructions.
slt(Register rd,Register rs,Register rt)1543 void Assembler::slt(Register rd, Register rs, Register rt) {
1544   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
1545 }
1546 
1547 
sltu(Register rd,Register rs,Register rt)1548 void Assembler::sltu(Register rd, Register rs, Register rt) {
1549   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
1550 }
1551 
1552 
slti(Register rt,Register rs,int32_t j)1553 void Assembler::slti(Register rt, Register rs, int32_t j) {
1554   GenInstrImmediate(SLTI, rs, rt, j);
1555 }
1556 
1557 
sltiu(Register rt,Register rs,int32_t j)1558 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
1559   GenInstrImmediate(SLTIU, rs, rt, j);
1560 }
1561 
1562 
1563 // Conditional move.
movz(Register rd,Register rs,Register rt)1564 void Assembler::movz(Register rd, Register rs, Register rt) {
1565   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
1566 }
1567 
1568 
movn(Register rd,Register rs,Register rt)1569 void Assembler::movn(Register rd, Register rs, Register rt) {
1570   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
1571 }
1572 
1573 
movt(Register rd,Register rs,uint16_t cc)1574 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
1575   Register rt;
1576   rt.code_ = (cc & 0x0007) << 2 | 1;
1577   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1578 }
1579 
1580 
movf(Register rd,Register rs,uint16_t cc)1581 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
1582   Register rt;
1583   rt.code_ = (cc & 0x0007) << 2 | 0;
1584   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1585 }
1586 
1587 
1588 // Bit twiddling.
clz(Register rd,Register rs)1589 void Assembler::clz(Register rd, Register rs) {
1590   // Clz instr requires same GPR number in 'rd' and 'rt' fields.
1591   GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
1592 }
1593 
1594 
ins_(Register rt,Register rs,uint16_t pos,uint16_t size)1595 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1596   // Should be called via MacroAssembler::Ins.
1597   // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
1598   ASSERT(kArchVariant == kMips32r2);
1599   GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
1600 }
1601 
1602 
ext_(Register rt,Register rs,uint16_t pos,uint16_t size)1603 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1604   // Should be called via MacroAssembler::Ext.
1605   // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
1606   ASSERT(kArchVariant == kMips32r2);
1607   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
1608 }
1609 
1610 
pref(int32_t hint,const MemOperand & rs)1611 void Assembler::pref(int32_t hint, const MemOperand& rs) {
1612   ASSERT(kArchVariant != kLoongson);
1613   ASSERT(is_uint5(hint) && is_uint16(rs.offset_));
1614   Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
1615       | (rs.offset_);
1616   emit(instr);
1617 }
1618 
1619 
1620 // --------Coprocessor-instructions----------------
1621 
1622 // Load, store, move.
lwc1(FPURegister fd,const MemOperand & src)1623 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
1624   GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1625 }
1626 
1627 
ldc1(FPURegister fd,const MemOperand & src)1628 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
1629   // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1630   // load to two 32-bit loads.
1631   GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
1632       Register::kMantissaOffset);
1633   FPURegister nextfpreg;
1634   nextfpreg.setcode(fd.code() + 1);
1635   GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
1636       Register::kExponentOffset);
1637 }
1638 
1639 
swc1(FPURegister fd,const MemOperand & src)1640 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
1641   GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1642 }
1643 
1644 
sdc1(FPURegister fd,const MemOperand & src)1645 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
1646   // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1647   // store to two 32-bit stores.
1648   GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
1649       Register::kMantissaOffset);
1650   FPURegister nextfpreg;
1651   nextfpreg.setcode(fd.code() + 1);
1652   GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
1653       Register::kExponentOffset);
1654 }
1655 
1656 
mtc1(Register rt,FPURegister fs)1657 void Assembler::mtc1(Register rt, FPURegister fs) {
1658   GenInstrRegister(COP1, MTC1, rt, fs, f0);
1659 }
1660 
1661 
mfc1(Register rt,FPURegister fs)1662 void Assembler::mfc1(Register rt, FPURegister fs) {
1663   GenInstrRegister(COP1, MFC1, rt, fs, f0);
1664 }
1665 
1666 
ctc1(Register rt,FPUControlRegister fs)1667 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
1668   GenInstrRegister(COP1, CTC1, rt, fs);
1669 }
1670 
1671 
cfc1(Register rt,FPUControlRegister fs)1672 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
1673   GenInstrRegister(COP1, CFC1, rt, fs);
1674 }
1675 
1676 
DoubleAsTwoUInt32(double d,uint32_t * lo,uint32_t * hi)1677 void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
1678   uint64_t i;
1679   memcpy(&i, &d, 8);
1680 
1681   *lo = i & 0xffffffff;
1682   *hi = i >> 32;
1683 }
1684 
1685 
1686 // Arithmetic.
1687 
add_d(FPURegister fd,FPURegister fs,FPURegister ft)1688 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1689   GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
1690 }
1691 
1692 
sub_d(FPURegister fd,FPURegister fs,FPURegister ft)1693 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1694   GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
1695 }
1696 
1697 
mul_d(FPURegister fd,FPURegister fs,FPURegister ft)1698 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1699   GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
1700 }
1701 
1702 
madd_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)1703 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
1704     FPURegister ft) {
1705   GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
1706 }
1707 
1708 
div_d(FPURegister fd,FPURegister fs,FPURegister ft)1709 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1710   GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
1711 }
1712 
1713 
abs_d(FPURegister fd,FPURegister fs)1714 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
1715   GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
1716 }
1717 
1718 
mov_d(FPURegister fd,FPURegister fs)1719 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
1720   GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
1721 }
1722 
1723 
neg_d(FPURegister fd,FPURegister fs)1724 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
1725   GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
1726 }
1727 
1728 
sqrt_d(FPURegister fd,FPURegister fs)1729 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
1730   GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
1731 }
1732 
1733 
1734 // Conversions.
1735 
cvt_w_s(FPURegister fd,FPURegister fs)1736 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
1737   GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
1738 }
1739 
1740 
cvt_w_d(FPURegister fd,FPURegister fs)1741 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
1742   GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
1743 }
1744 
1745 
trunc_w_s(FPURegister fd,FPURegister fs)1746 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
1747   GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
1748 }
1749 
1750 
trunc_w_d(FPURegister fd,FPURegister fs)1751 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
1752   GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
1753 }
1754 
1755 
round_w_s(FPURegister fd,FPURegister fs)1756 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
1757   GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
1758 }
1759 
1760 
round_w_d(FPURegister fd,FPURegister fs)1761 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
1762   GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
1763 }
1764 
1765 
floor_w_s(FPURegister fd,FPURegister fs)1766 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
1767   GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
1768 }
1769 
1770 
floor_w_d(FPURegister fd,FPURegister fs)1771 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
1772   GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
1773 }
1774 
1775 
ceil_w_s(FPURegister fd,FPURegister fs)1776 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
1777   GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
1778 }
1779 
1780 
ceil_w_d(FPURegister fd,FPURegister fs)1781 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
1782   GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
1783 }
1784 
1785 
cvt_l_s(FPURegister fd,FPURegister fs)1786 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
1787   ASSERT(kArchVariant == kMips32r2);
1788   GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
1789 }
1790 
1791 
cvt_l_d(FPURegister fd,FPURegister fs)1792 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
1793   ASSERT(kArchVariant == kMips32r2);
1794   GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
1795 }
1796 
1797 
trunc_l_s(FPURegister fd,FPURegister fs)1798 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
1799   ASSERT(kArchVariant == kMips32r2);
1800   GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
1801 }
1802 
1803 
trunc_l_d(FPURegister fd,FPURegister fs)1804 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
1805   ASSERT(kArchVariant == kMips32r2);
1806   GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
1807 }
1808 
1809 
round_l_s(FPURegister fd,FPURegister fs)1810 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
1811   GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
1812 }
1813 
1814 
round_l_d(FPURegister fd,FPURegister fs)1815 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
1816   GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
1817 }
1818 
1819 
floor_l_s(FPURegister fd,FPURegister fs)1820 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
1821   GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
1822 }
1823 
1824 
floor_l_d(FPURegister fd,FPURegister fs)1825 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
1826   GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
1827 }
1828 
1829 
ceil_l_s(FPURegister fd,FPURegister fs)1830 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
1831   GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
1832 }
1833 
1834 
ceil_l_d(FPURegister fd,FPURegister fs)1835 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
1836   GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
1837 }
1838 
1839 
cvt_s_w(FPURegister fd,FPURegister fs)1840 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
1841   GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
1842 }
1843 
1844 
cvt_s_l(FPURegister fd,FPURegister fs)1845 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
1846   ASSERT(kArchVariant == kMips32r2);
1847   GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
1848 }
1849 
1850 
cvt_s_d(FPURegister fd,FPURegister fs)1851 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
1852   GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
1853 }
1854 
1855 
cvt_d_w(FPURegister fd,FPURegister fs)1856 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
1857   GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
1858 }
1859 
1860 
cvt_d_l(FPURegister fd,FPURegister fs)1861 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
1862   ASSERT(kArchVariant == kMips32r2);
1863   GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
1864 }
1865 
1866 
cvt_d_s(FPURegister fd,FPURegister fs)1867 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
1868   GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
1869 }
1870 
1871 
1872 // Conditions.
c(FPUCondition cond,SecondaryField fmt,FPURegister fs,FPURegister ft,uint16_t cc)1873 void Assembler::c(FPUCondition cond, SecondaryField fmt,
1874     FPURegister fs, FPURegister ft, uint16_t cc) {
1875   ASSERT(is_uint3(cc));
1876   ASSERT((fmt & ~(31 << kRsShift)) == 0);
1877   Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
1878       | cc << 8 | 3 << 4 | cond;
1879   emit(instr);
1880 }
1881 
1882 
fcmp(FPURegister src1,const double src2,FPUCondition cond)1883 void Assembler::fcmp(FPURegister src1, const double src2,
1884       FPUCondition cond) {
1885   ASSERT(src2 == 0.0);
1886   mtc1(zero_reg, f14);
1887   cvt_d_w(f14, f14);
1888   c(cond, D, src1, f14, 0);
1889 }
1890 
1891 
bc1f(int16_t offset,uint16_t cc)1892 void Assembler::bc1f(int16_t offset, uint16_t cc) {
1893   ASSERT(is_uint3(cc));
1894   Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
1895   emit(instr);
1896 }
1897 
1898 
bc1t(int16_t offset,uint16_t cc)1899 void Assembler::bc1t(int16_t offset, uint16_t cc) {
1900   ASSERT(is_uint3(cc));
1901   Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
1902   emit(instr);
1903 }
1904 
1905 
1906 // Debugging.
RecordJSReturn()1907 void Assembler::RecordJSReturn() {
1908   positions_recorder()->WriteRecordedPositions();
1909   CheckBuffer();
1910   RecordRelocInfo(RelocInfo::JS_RETURN);
1911 }
1912 
1913 
RecordDebugBreakSlot()1914 void Assembler::RecordDebugBreakSlot() {
1915   positions_recorder()->WriteRecordedPositions();
1916   CheckBuffer();
1917   RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
1918 }
1919 
1920 
RecordComment(const char * msg)1921 void Assembler::RecordComment(const char* msg) {
1922   if (FLAG_code_comments) {
1923     CheckBuffer();
1924     RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1925   }
1926 }
1927 
1928 
RelocateInternalReference(byte * pc,intptr_t pc_delta)1929 int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
1930   Instr instr = instr_at(pc);
1931   ASSERT(IsJ(instr) || IsLui(instr));
1932   if (IsLui(instr)) {
1933     Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
1934     Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
1935     ASSERT(IsOri(instr_ori));
1936     int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
1937     imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
1938     if (imm == kEndOfJumpChain) {
1939       return 0;  // Number of instructions patched.
1940     }
1941     imm += pc_delta;
1942     ASSERT((imm & 3) == 0);
1943 
1944     instr_lui &= ~kImm16Mask;
1945     instr_ori &= ~kImm16Mask;
1946 
1947     instr_at_put(pc + 0 * Assembler::kInstrSize,
1948                  instr_lui | ((imm >> kLuiShift) & kImm16Mask));
1949     instr_at_put(pc + 1 * Assembler::kInstrSize,
1950                  instr_ori | (imm & kImm16Mask));
1951     return 2;  // Number of instructions patched.
1952   } else {
1953     uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
1954     if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
1955       return 0;  // Number of instructions patched.
1956     }
1957     imm28 += pc_delta;
1958     imm28 &= kImm28Mask;
1959     ASSERT((imm28 & 3) == 0);
1960 
1961     instr &= ~kImm26Mask;
1962     uint32_t imm26 = imm28 >> 2;
1963     ASSERT(is_uint26(imm26));
1964 
1965     instr_at_put(pc, instr | (imm26 & kImm26Mask));
1966     return 1;  // Number of instructions patched.
1967   }
1968 }
1969 
1970 
GrowBuffer()1971 void Assembler::GrowBuffer() {
1972   if (!own_buffer_) FATAL("external code buffer is too small");
1973 
1974   // Compute new buffer size.
1975   CodeDesc desc;  // The new buffer.
1976   if (buffer_size_ < 4*KB) {
1977     desc.buffer_size = 4*KB;
1978   } else if (buffer_size_ < 1*MB) {
1979     desc.buffer_size = 2*buffer_size_;
1980   } else {
1981     desc.buffer_size = buffer_size_ + 1*MB;
1982   }
1983   CHECK_GT(desc.buffer_size, 0);  // No overflow.
1984 
1985   // Set up new buffer.
1986   desc.buffer = NewArray<byte>(desc.buffer_size);
1987 
1988   desc.instr_size = pc_offset();
1989   desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1990 
1991   // Copy the data.
1992   int pc_delta = desc.buffer - buffer_;
1993   int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1994   MemMove(desc.buffer, buffer_, desc.instr_size);
1995   MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
1996           desc.reloc_size);
1997 
1998   // Switch buffers.
1999   DeleteArray(buffer_);
2000   buffer_ = desc.buffer;
2001   buffer_size_ = desc.buffer_size;
2002   pc_ += pc_delta;
2003   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2004                                reloc_info_writer.last_pc() + pc_delta);
2005 
2006   // Relocate runtime entries.
2007   for (RelocIterator it(desc); !it.done(); it.next()) {
2008     RelocInfo::Mode rmode = it.rinfo()->rmode();
2009     if (rmode == RelocInfo::INTERNAL_REFERENCE) {
2010       byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
2011       RelocateInternalReference(p, pc_delta);
2012     }
2013   }
2014 
2015   ASSERT(!overflow());
2016 }
2017 
2018 
db(uint8_t data)2019 void Assembler::db(uint8_t data) {
2020   CheckBuffer();
2021   *reinterpret_cast<uint8_t*>(pc_) = data;
2022   pc_ += sizeof(uint8_t);
2023 }
2024 
2025 
dd(uint32_t data)2026 void Assembler::dd(uint32_t data) {
2027   CheckBuffer();
2028   *reinterpret_cast<uint32_t*>(pc_) = data;
2029   pc_ += sizeof(uint32_t);
2030 }
2031 
2032 
emit_code_stub_address(Code * stub)2033 void Assembler::emit_code_stub_address(Code* stub) {
2034   CheckBuffer();
2035   *reinterpret_cast<uint32_t*>(pc_) =
2036       reinterpret_cast<uint32_t>(stub->instruction_start());
2037   pc_ += sizeof(uint32_t);
2038 }
2039 
2040 
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)2041 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2042   // We do not try to reuse pool constants.
2043   RelocInfo rinfo(pc_, rmode, data, NULL);
2044   if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
2045     // Adjust code for new modes.
2046     ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2047            || RelocInfo::IsJSReturn(rmode)
2048            || RelocInfo::IsComment(rmode)
2049            || RelocInfo::IsPosition(rmode));
2050     // These modes do not need an entry in the constant pool.
2051   }
2052   if (!RelocInfo::IsNone(rinfo.rmode())) {
2053     // Don't record external references unless the heap will be serialized.
2054     if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
2055         !serializer_enabled() && !emit_debug_code()) {
2056       return;
2057     }
2058     ASSERT(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
2059     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2060       RelocInfo reloc_info_with_ast_id(pc_,
2061                                        rmode,
2062                                        RecordedAstId().ToInt(),
2063                                        NULL);
2064       ClearRecordedAstId();
2065       reloc_info_writer.Write(&reloc_info_with_ast_id);
2066     } else {
2067       reloc_info_writer.Write(&rinfo);
2068     }
2069   }
2070 }
2071 
2072 
BlockTrampolinePoolFor(int instructions)2073 void Assembler::BlockTrampolinePoolFor(int instructions) {
2074   BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2075 }
2076 
2077 
CheckTrampolinePool()2078 void Assembler::CheckTrampolinePool() {
2079   // Some small sequences of instructions must not be broken up by the
2080   // insertion of a trampoline pool; such sequences are protected by setting
2081   // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2082   // which are both checked here. Also, recursive calls to CheckTrampolinePool
2083   // are blocked by trampoline_pool_blocked_nesting_.
2084   if ((trampoline_pool_blocked_nesting_ > 0) ||
2085       (pc_offset() < no_trampoline_pool_before_)) {
2086     // Emission is currently blocked; make sure we try again as soon as
2087     // possible.
2088     if (trampoline_pool_blocked_nesting_ > 0) {
2089       next_buffer_check_ = pc_offset() + kInstrSize;
2090     } else {
2091       next_buffer_check_ = no_trampoline_pool_before_;
2092     }
2093     return;
2094   }
2095 
2096   ASSERT(!trampoline_emitted_);
2097   ASSERT(unbound_labels_count_ >= 0);
2098   if (unbound_labels_count_ > 0) {
2099     // First we emit jump (2 instructions), then we emit trampoline pool.
2100     { BlockTrampolinePoolScope block_trampoline_pool(this);
2101       Label after_pool;
2102       b(&after_pool);
2103       nop();
2104 
2105       int pool_start = pc_offset();
2106       for (int i = 0; i < unbound_labels_count_; i++) {
2107         uint32_t imm32;
2108         imm32 = jump_address(&after_pool);
2109         { BlockGrowBufferScope block_buf_growth(this);
2110           // Buffer growth (and relocation) must be blocked for internal
2111           // references until associated instructions are emitted and available
2112           // to be patched.
2113           RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2114           lui(at, (imm32 & kHiMask) >> kLuiShift);
2115           ori(at, at, (imm32 & kImm16Mask));
2116         }
2117         jr(at);
2118         nop();
2119       }
2120       bind(&after_pool);
2121       trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2122 
2123       trampoline_emitted_ = true;
2124       // As we are only going to emit trampoline once, we need to prevent any
2125       // further emission.
2126       next_buffer_check_ = kMaxInt;
2127     }
2128   } else {
2129     // Number of branches to unbound label at this point is zero, so we can
2130     // move next buffer check to maximum.
2131     next_buffer_check_ = pc_offset() +
2132         kMaxBranchOffset - kTrampolineSlotsSize * 16;
2133   }
2134   return;
2135 }
2136 
2137 
target_address_at(Address pc)2138 Address Assembler::target_address_at(Address pc) {
2139   Instr instr1 = instr_at(pc);
2140   Instr instr2 = instr_at(pc + kInstrSize);
2141   // Interpret 2 instructions generated by li: lui/ori
2142   if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
2143     // Assemble the 32 bit value.
2144     return reinterpret_cast<Address>(
2145         (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
2146   }
2147 
2148   // We should never get here, force a bad address if we do.
2149   UNREACHABLE();
2150   return (Address)0x0;
2151 }
2152 
2153 
2154 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
2155 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
2156 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
2157 // OS::nan_value() returns a qNaN.
QuietNaN(HeapObject * object)2158 void Assembler::QuietNaN(HeapObject* object) {
2159   HeapNumber::cast(object)->set_value(OS::nan_value());
2160 }
2161 
2162 
2163 // On Mips, a target address is stored in a lui/ori instruction pair, each
2164 // of which load 16 bits of the 32-bit address to a register.
2165 // Patching the address must replace both instr, and flush the i-cache.
2166 //
2167 // There is an optimization below, which emits a nop when the address
2168 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
2169 // and possibly removed.
set_target_address_at(Address pc,Address target,ICacheFlushMode icache_flush_mode)2170 void Assembler::set_target_address_at(Address pc,
2171                                       Address target,
2172                                       ICacheFlushMode icache_flush_mode) {
2173   Instr instr2 = instr_at(pc + kInstrSize);
2174   uint32_t rt_code = GetRtField(instr2);
2175   uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2176   uint32_t itarget = reinterpret_cast<uint32_t>(target);
2177 
2178 #ifdef DEBUG
2179   // Check we have the result from a li macro-instruction, using instr pair.
2180   Instr instr1 = instr_at(pc);
2181   CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
2182 #endif
2183 
2184   // Must use 2 instructions to insure patchable code => just use lui and ori.
2185   // lui rt, upper-16.
2186   // ori rt rt, lower-16.
2187   *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
2188   *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
2189 
2190   // The following code is an optimization for the common case of Call()
2191   // or Jump() which is load to register, and jump through register:
2192   //     li(t9, address); jalr(t9)    (or jr(t9)).
2193   // If the destination address is in the same 256 MB page as the call, it
2194   // is faster to do a direct jal, or j, rather than jump thru register, since
2195   // that lets the cpu pipeline prefetch the target address. However each
2196   // time the address above is patched, we have to patch the direct jal/j
2197   // instruction, as well as possibly revert to jalr/jr if we now cross a
2198   // 256 MB page. Note that with the jal/j instructions, we do not need to
2199   // load the register, but that code is left, since it makes it easy to
2200   // revert this process. A further optimization could try replacing the
2201   // li sequence with nops.
2202   // This optimization can only be applied if the rt-code from instr2 is the
2203   // register used for the jalr/jr. Finally, we have to skip 'jr ra', which is
2204   // mips return. Occasionally this lands after an li().
2205 
2206   Instr instr3 = instr_at(pc + 2 * kInstrSize);
2207   uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
2208   bool in_range = ((ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
2209   uint32_t target_field =
2210       static_cast<uint32_t>(itarget & kJumpAddrMask) >> kImmFieldShift;
2211   bool patched_jump = false;
2212 
2213 #ifndef ALLOW_JAL_IN_BOUNDARY_REGION
2214   // This is a workaround to the 24k core E156 bug (affect some 34k cores also).
2215   // Since the excluded space is only 64KB out of 256MB (0.02 %), we will just
2216   // apply this workaround for all cores so we don't have to identify the core.
2217   if (in_range) {
2218     // The 24k core E156 bug has some very specific requirements, we only check
2219     // the most simple one: if the address of the delay slot instruction is in
2220     // the first or last 32 KB of the 256 MB segment.
2221     uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1);
2222     uint32_t ipc_segment_addr = ipc & segment_mask;
2223     if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
2224       in_range = false;
2225   }
2226 #endif
2227 
2228   if (IsJalr(instr3)) {
2229     // Try to convert JALR to JAL.
2230     if (in_range && GetRt(instr2) == GetRs(instr3)) {
2231       *(p+2) = JAL | target_field;
2232       patched_jump = true;
2233     }
2234   } else if (IsJr(instr3)) {
2235     // Try to convert JR to J, skip returns (jr ra).
2236     bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
2237     if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
2238       *(p+2) = J | target_field;
2239       patched_jump = true;
2240     }
2241   } else if (IsJal(instr3)) {
2242     if (in_range) {
2243       // We are patching an already converted JAL.
2244       *(p+2) = JAL | target_field;
2245     } else {
2246       // Patch JAL, but out of range, revert to JALR.
2247       // JALR rs reg is the rt reg specified in the ORI instruction.
2248       uint32_t rs_field = GetRt(instr2) << kRsShift;
2249       uint32_t rd_field = ra.code() << kRdShift;  // Return-address (ra) reg.
2250       *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2251     }
2252     patched_jump = true;
2253   } else if (IsJ(instr3)) {
2254     if (in_range) {
2255       // We are patching an already converted J (jump).
2256       *(p+2) = J | target_field;
2257     } else {
2258       // Trying patch J, but out of range, just go back to JR.
2259       // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
2260       uint32_t rs_field = GetRt(instr2) << kRsShift;
2261       *(p+2) = SPECIAL | rs_field | JR;
2262     }
2263     patched_jump = true;
2264   }
2265 
2266   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
2267     CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
2268   }
2269 }
2270 
2271 
JumpLabelToJumpRegister(Address pc)2272 void Assembler::JumpLabelToJumpRegister(Address pc) {
2273   // Address pc points to lui/ori instructions.
2274   // Jump to label may follow at pc + 2 * kInstrSize.
2275   uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2276 #ifdef DEBUG
2277   Instr instr1 = instr_at(pc);
2278 #endif
2279   Instr instr2 = instr_at(pc + 1 * kInstrSize);
2280   Instr instr3 = instr_at(pc + 2 * kInstrSize);
2281   bool patched = false;
2282 
2283   if (IsJal(instr3)) {
2284     ASSERT(GetOpcodeField(instr1) == LUI);
2285     ASSERT(GetOpcodeField(instr2) == ORI);
2286 
2287     uint32_t rs_field = GetRt(instr2) << kRsShift;
2288     uint32_t rd_field = ra.code() << kRdShift;  // Return-address (ra) reg.
2289     *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2290     patched = true;
2291   } else if (IsJ(instr3)) {
2292     ASSERT(GetOpcodeField(instr1) == LUI);
2293     ASSERT(GetOpcodeField(instr2) == ORI);
2294 
2295     uint32_t rs_field = GetRt(instr2) << kRsShift;
2296     *(p+2) = SPECIAL | rs_field | JR;
2297     patched = true;
2298   }
2299 
2300   if (patched) {
2301       CPU::FlushICache(pc+2, sizeof(Address));
2302   }
2303 }
2304 
2305 
NewConstantPool(Isolate * isolate)2306 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
2307   // No out-of-line constant pool support.
2308   ASSERT(!FLAG_enable_ool_constant_pool);
2309   return isolate->factory()->empty_constant_pool_array();
2310 }
2311 
2312 
PopulateConstantPool(ConstantPoolArray * constant_pool)2313 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
2314   // No out-of-line constant pool support.
2315   ASSERT(!FLAG_enable_ool_constant_pool);
2316   return;
2317 }
2318 
2319 
2320 } }  // namespace v8::internal
2321 
2322 #endif  // V8_TARGET_ARCH_MIPS
2323