• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 #include "src/codegen/mips/assembler-mips.h"
36 
37 #if V8_TARGET_ARCH_MIPS
38 
39 #include "src/base/bits.h"
40 #include "src/base/cpu.h"
41 #include "src/codegen/mips/assembler-mips-inl.h"
42 #include "src/codegen/safepoint-table.h"
43 #include "src/codegen/string-constants.h"
44 #include "src/deoptimizer/deoptimizer.h"
45 #include "src/objects/heap-number-inl.h"
46 
47 namespace v8 {
48 namespace internal {
49 
50 // Get the CPU features enabled by the build. For cross compilation the
51 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
52 // can be defined to enable FPU instructions when building the
53 // snapshot.
CpuFeaturesImpliedByCompiler()54 static unsigned CpuFeaturesImpliedByCompiler() {
55   unsigned answer = 0;
56 #ifdef CAN_USE_FPU_INSTRUCTIONS
57   answer |= 1u << FPU;
58 #endif  // def CAN_USE_FPU_INSTRUCTIONS
59 
60   // If the compiler is allowed to use FPU then we can use FPU too in our code
61   // generation even when generating snapshots.  This won't work for cross
62   // compilation.
63 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
64   answer |= 1u << FPU;
65 #endif
66 
67   return answer;
68 }
69 
ProbeImpl(bool cross_compile)70 void CpuFeatures::ProbeImpl(bool cross_compile) {
71   supported_ |= CpuFeaturesImpliedByCompiler();
72 
73   // Only use statically determined features for cross compile (snapshot).
74   if (cross_compile) return;
75 
76     // If the compiler is allowed to use fpu then we can use fpu too in our
77     // code generation.
78 #ifndef __mips__
79   // For the simulator build, use FPU.
80   supported_ |= 1u << FPU;
81 #if defined(_MIPS_ARCH_MIPS32R6)
82   // FP64 mode is implied on r6.
83   supported_ |= 1u << FP64FPU;
84 #if defined(_MIPS_MSA)
85   supported_ |= 1u << MIPS_SIMD;
86 #endif
87 #endif
88 #if defined(FPU_MODE_FP64)
89   supported_ |= 1u << FP64FPU;
90 #endif
91 #else
92   // Probe for additional features at runtime.
93   base::CPU cpu;
94   if (cpu.has_fpu()) supported_ |= 1u << FPU;
95 #if defined(FPU_MODE_FPXX)
96   if (cpu.is_fp64_mode()) supported_ |= 1u << FP64FPU;
97 #elif defined(FPU_MODE_FP64)
98   supported_ |= 1u << FP64FPU;
99 #if defined(_MIPS_ARCH_MIPS32R6)
100 #if defined(_MIPS_MSA)
101   supported_ |= 1u << MIPS_SIMD;
102 #else
103   if (cpu.has_msa()) supported_ |= 1u << MIPS_SIMD;
104 #endif
105 #endif
106 #endif
107 #if defined(_MIPS_ARCH_MIPS32RX)
108   if (cpu.architecture() == 6) {
109     supported_ |= 1u << MIPSr6;
110   } else if (cpu.architecture() == 2) {
111     supported_ |= 1u << MIPSr1;
112     supported_ |= 1u << MIPSr2;
113   } else {
114     supported_ |= 1u << MIPSr1;
115   }
116 #endif
117 #endif
118 }
119 
PrintTarget()120 void CpuFeatures::PrintTarget() {}
PrintFeatures()121 void CpuFeatures::PrintFeatures() {}
122 
ToNumber(Register reg)123 int ToNumber(Register reg) {
124   DCHECK(reg.is_valid());
125   const int kNumbers[] = {
126       0,   // zero_reg
127       1,   // at
128       2,   // v0
129       3,   // v1
130       4,   // a0
131       5,   // a1
132       6,   // a2
133       7,   // a3
134       8,   // t0
135       9,   // t1
136       10,  // t2
137       11,  // t3
138       12,  // t4
139       13,  // t5
140       14,  // t6
141       15,  // t7
142       16,  // s0
143       17,  // s1
144       18,  // s2
145       19,  // s3
146       20,  // s4
147       21,  // s5
148       22,  // s6
149       23,  // s7
150       24,  // t8
151       25,  // t9
152       26,  // k0
153       27,  // k1
154       28,  // gp
155       29,  // sp
156       30,  // fp
157       31,  // ra
158   };
159   return kNumbers[reg.code()];
160 }
161 
ToRegister(int num)162 Register ToRegister(int num) {
163   DCHECK(num >= 0 && num < kNumRegisters);
164   const Register kRegisters[] = {
165       zero_reg, at, v0, v1, a0, a1, a2, a3, t0, t1, t2, t3, t4, t5, t6, t7,
166       s0,       s1, s2, s3, s4, s5, s6, s7, t8, t9, k0, k1, gp, sp, fp, ra};
167   return kRegisters[num];
168 }
169 
170 // -----------------------------------------------------------------------------
171 // Implementation of RelocInfo.
172 
173 const int RelocInfo::kApplyMask =
174     RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
175     RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
176     RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
177 
IsCodedSpecially()178 bool RelocInfo::IsCodedSpecially() {
179   // The deserializer needs to know whether a pointer is specially coded.  Being
180   // specially coded on MIPS means that it is a lui/ori instruction, and that is
181   // always the case inside code objects.
182   return true;
183 }
184 
IsInConstantPool()185 bool RelocInfo::IsInConstantPool() { return false; }
186 
wasm_call_tag() const187 uint32_t RelocInfo::wasm_call_tag() const {
188   DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
189   return static_cast<uint32_t>(
190       Assembler::target_address_at(pc_, constant_pool_));
191 }
192 
193 // -----------------------------------------------------------------------------
194 // Implementation of Operand and MemOperand.
195 // See assembler-mips-inl.h for inlined constructors.
196 
Operand(Handle<HeapObject> handle)197 Operand::Operand(Handle<HeapObject> handle)
198     : rm_(no_reg), rmode_(RelocInfo::FULL_EMBEDDED_OBJECT) {
199   value_.immediate = static_cast<intptr_t>(handle.address());
200 }
201 
EmbeddedNumber(double value)202 Operand Operand::EmbeddedNumber(double value) {
203   int32_t smi;
204   if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
205   Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
206   result.is_heap_object_request_ = true;
207   result.value_.heap_object_request = HeapObjectRequest(value);
208   return result;
209 }
210 
EmbeddedStringConstant(const StringConstantBase * str)211 Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
212   Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
213   result.is_heap_object_request_ = true;
214   result.value_.heap_object_request = HeapObjectRequest(str);
215   return result;
216 }
217 
MemOperand(Register rm,int32_t offset)218 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
219   offset_ = offset;
220 }
221 
MemOperand(Register rm,int32_t unit,int32_t multiplier,OffsetAddend offset_addend)222 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
223                        OffsetAddend offset_addend)
224     : Operand(rm) {
225   offset_ = unit * multiplier + offset_addend;
226 }
227 
AllocateAndInstallRequestedHeapObjects(Isolate * isolate)228 void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
229   DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
230   for (auto& request : heap_object_requests_) {
231     Handle<HeapObject> object;
232     switch (request.kind()) {
233       case HeapObjectRequest::kHeapNumber:
234         object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
235             request.heap_number());
236         break;
237       case HeapObjectRequest::kStringConstant:
238         const StringConstantBase* str = request.string();
239         CHECK_NOT_NULL(str);
240         object = str->AllocateStringConstant(isolate);
241         break;
242     }
243     Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
244     set_target_value_at(pc, reinterpret_cast<uint32_t>(object.location()));
245   }
246 }
247 
248 // -----------------------------------------------------------------------------
249 // Specific instructions, constants, and masks.
250 
251 static const int kNegOffset = 0x00008000;
252 // addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
253 // operations as post-increment of sp.
254 const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift) |
255                               (sp.code() << kRtShift) |
256                               (kPointerSize & kImm16Mask);  // NOLINT
257 // addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
258 const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift) |
259                                (sp.code() << kRtShift) |
260                                (-kPointerSize & kImm16Mask);  // NOLINT
261 // sw(r, MemOperand(sp, 0))
262 const Instr kPushRegPattern =
263     SW | (sp.code() << kRsShift) | (0 & kImm16Mask);  // NOLINT
264 //  lw(r, MemOperand(sp, 0))
265 const Instr kPopRegPattern =
266     LW | (sp.code() << kRsShift) | (0 & kImm16Mask);  // NOLINT
267 
268 const Instr kLwRegFpOffsetPattern =
269     LW | (fp.code() << kRsShift) | (0 & kImm16Mask);  // NOLINT
270 
271 const Instr kSwRegFpOffsetPattern =
272     SW | (fp.code() << kRsShift) | (0 & kImm16Mask);  // NOLINT
273 
274 const Instr kLwRegFpNegOffsetPattern =
275     LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask);  // NOLINT
276 
277 const Instr kSwRegFpNegOffsetPattern =
278     SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask);  // NOLINT
279 // A mask for the Rt register for push, pop, lw, sw instructions.
280 const Instr kRtMask = kRtFieldMask;
281 const Instr kLwSwInstrTypeMask = 0xFFE00000;
282 const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
283 const Instr kLwSwOffsetMask = kImm16Mask;
284 
Assembler(const AssemblerOptions & options,std::unique_ptr<AssemblerBuffer> buffer)285 Assembler::Assembler(const AssemblerOptions& options,
286                      std::unique_ptr<AssemblerBuffer> buffer)
287     : AssemblerBase(options, std::move(buffer)),
288       scratch_register_list_(at.bit()) {
289   reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
290 
291   last_trampoline_pool_end_ = 0;
292   no_trampoline_pool_before_ = 0;
293   trampoline_pool_blocked_nesting_ = 0;
294   // We leave space (16 * kTrampolineSlotsSize)
295   // for BlockTrampolinePoolScope buffer.
296   next_buffer_check_ = FLAG_force_long_branches
297                            ? kMaxInt
298                            : kMaxBranchOffset - kTrampolineSlotsSize * 16;
299   internal_trampoline_exception_ = false;
300   last_bound_pos_ = 0;
301 
302   trampoline_emitted_ = FLAG_force_long_branches;
303   unbound_labels_count_ = 0;
304   block_buffer_growth_ = false;
305 }
306 
GetCode(Isolate * isolate,CodeDesc * desc,SafepointTableBuilder * safepoint_table_builder,int handler_table_offset)307 void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
308                         SafepointTableBuilder* safepoint_table_builder,
309                         int handler_table_offset) {
310   // As a crutch to avoid having to add manual Align calls wherever we use a
311   // raw workflow to create Code objects (mostly in tests), add another Align
312   // call here. It does no harm - the end of the Code object is aligned to the
313   // (larger) kCodeAlignment anyways.
314   // TODO(jgruber): Consider moving responsibility for proper alignment to
315   // metadata table builders (safepoint, handler, constant pool, code
316   // comments).
317   DataAlign(Code::kMetadataAlignment);
318 
319   EmitForbiddenSlotInstruction();
320 
321   int code_comments_size = WriteCodeComments();
322 
323   DCHECK(pc_ <= reloc_info_writer.pos());  // No overlap.
324 
325   AllocateAndInstallRequestedHeapObjects(isolate);
326 
327   // Set up code descriptor.
328   // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
329   // this point to make CodeDesc initialization less fiddly.
330 
331   static constexpr int kConstantPoolSize = 0;
332   const int instruction_size = pc_offset();
333   const int code_comments_offset = instruction_size - code_comments_size;
334   const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
335   const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
336                                         ? constant_pool_offset
337                                         : handler_table_offset;
338   const int safepoint_table_offset =
339       (safepoint_table_builder == kNoSafepointTable)
340           ? handler_table_offset2
341           : safepoint_table_builder->GetCodeOffset();
342   const int reloc_info_offset =
343       static_cast<int>(reloc_info_writer.pos() - buffer_->start());
344   CodeDesc::Initialize(desc, this, safepoint_table_offset,
345                        handler_table_offset2, constant_pool_offset,
346                        code_comments_offset, reloc_info_offset);
347 }
348 
Align(int m)349 void Assembler::Align(int m) {
350   DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
351   EmitForbiddenSlotInstruction();
352   while ((pc_offset() & (m - 1)) != 0) {
353     nop();
354   }
355 }
356 
CodeTargetAlign()357 void Assembler::CodeTargetAlign() {
358   // No advantage to aligning branch/call targets to more than
359   // single instruction, that I am aware of.
360   Align(4);
361 }
362 
GetRtReg(Instr instr)363 Register Assembler::GetRtReg(Instr instr) {
364   return Register::from_code((instr & kRtFieldMask) >> kRtShift);
365 }
366 
GetRsReg(Instr instr)367 Register Assembler::GetRsReg(Instr instr) {
368   return Register::from_code((instr & kRsFieldMask) >> kRsShift);
369 }
370 
GetRdReg(Instr instr)371 Register Assembler::GetRdReg(Instr instr) {
372   return Register::from_code((instr & kRdFieldMask) >> kRdShift);
373 }
374 
GetRt(Instr instr)375 uint32_t Assembler::GetRt(Instr instr) {
376   return (instr & kRtFieldMask) >> kRtShift;
377 }
378 
GetRtField(Instr instr)379 uint32_t Assembler::GetRtField(Instr instr) { return instr & kRtFieldMask; }
380 
GetRs(Instr instr)381 uint32_t Assembler::GetRs(Instr instr) {
382   return (instr & kRsFieldMask) >> kRsShift;
383 }
384 
GetRsField(Instr instr)385 uint32_t Assembler::GetRsField(Instr instr) { return instr & kRsFieldMask; }
386 
GetRd(Instr instr)387 uint32_t Assembler::GetRd(Instr instr) {
388   return (instr & kRdFieldMask) >> kRdShift;
389 }
390 
GetRdField(Instr instr)391 uint32_t Assembler::GetRdField(Instr instr) { return instr & kRdFieldMask; }
392 
GetSa(Instr instr)393 uint32_t Assembler::GetSa(Instr instr) {
394   return (instr & kSaFieldMask) >> kSaShift;
395 }
396 
GetSaField(Instr instr)397 uint32_t Assembler::GetSaField(Instr instr) { return instr & kSaFieldMask; }
398 
GetOpcodeField(Instr instr)399 uint32_t Assembler::GetOpcodeField(Instr instr) { return instr & kOpcodeMask; }
400 
GetFunction(Instr instr)401 uint32_t Assembler::GetFunction(Instr instr) {
402   return (instr & kFunctionFieldMask) >> kFunctionShift;
403 }
404 
GetFunctionField(Instr instr)405 uint32_t Assembler::GetFunctionField(Instr instr) {
406   return instr & kFunctionFieldMask;
407 }
408 
GetImmediate16(Instr instr)409 uint32_t Assembler::GetImmediate16(Instr instr) { return instr & kImm16Mask; }
410 
GetLabelConst(Instr instr)411 uint32_t Assembler::GetLabelConst(Instr instr) { return instr & ~kImm16Mask; }
412 
IsPop(Instr instr)413 bool Assembler::IsPop(Instr instr) {
414   return (instr & ~kRtMask) == kPopRegPattern;
415 }
416 
IsPush(Instr instr)417 bool Assembler::IsPush(Instr instr) {
418   return (instr & ~kRtMask) == kPushRegPattern;
419 }
420 
IsSwRegFpOffset(Instr instr)421 bool Assembler::IsSwRegFpOffset(Instr instr) {
422   return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
423 }
424 
IsLwRegFpOffset(Instr instr)425 bool Assembler::IsLwRegFpOffset(Instr instr) {
426   return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
427 }
428 
IsSwRegFpNegOffset(Instr instr)429 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
430   return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
431           kSwRegFpNegOffsetPattern);
432 }
433 
IsLwRegFpNegOffset(Instr instr)434 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
435   return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
436           kLwRegFpNegOffsetPattern);
437 }
438 
439 // Labels refer to positions in the (to be) generated code.
440 // There are bound, linked, and unused labels.
441 //
442 // Bound labels refer to known positions in the already
443 // generated code. pos() is the position the label refers to.
444 //
445 // Linked labels refer to unknown positions in the code
446 // to be generated; pos() is the position of the last
447 // instruction using the label.
448 
449 // The link chain is terminated by a value in the instruction of -1,
450 // which is an otherwise illegal value (branch -1 is inf loop).
451 // The instruction 16-bit offset field addresses 32-bit words, but in
452 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
453 
454 const int kEndOfChain = -4;
455 // Determines the end of the Jump chain (a subset of the label link chain).
456 const int kEndOfJumpChain = 0;
457 
IsMsaBranch(Instr instr)458 bool Assembler::IsMsaBranch(Instr instr) {
459   uint32_t opcode = GetOpcodeField(instr);
460   uint32_t rs_field = GetRsField(instr);
461   if (opcode == COP1) {
462     switch (rs_field) {
463       case BZ_V:
464       case BZ_B:
465       case BZ_H:
466       case BZ_W:
467       case BZ_D:
468       case BNZ_V:
469       case BNZ_B:
470       case BNZ_H:
471       case BNZ_W:
472       case BNZ_D:
473         return true;
474       default:
475         return false;
476     }
477   } else {
478     return false;
479   }
480 }
481 
IsBranch(Instr instr)482 bool Assembler::IsBranch(Instr instr) {
483   uint32_t opcode = GetOpcodeField(instr);
484   uint32_t rt_field = GetRtField(instr);
485   uint32_t rs_field = GetRsField(instr);
486   // Checks if the instruction is a branch.
487   bool isBranch =
488       opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
489       opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
490       (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
491                             rt_field == BLTZAL || rt_field == BGEZAL)) ||
492       (opcode == COP1 && rs_field == BC1) ||  // Coprocessor branch.
493       (opcode == COP1 && rs_field == BC1EQZ) ||
494       (opcode == COP1 && rs_field == BC1NEZ) || IsMsaBranch(instr);
495   if (!isBranch && IsMipsArchVariant(kMips32r6)) {
496     // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
497     // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
498     isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
499                 opcode == BALC ||
500                 (opcode == POP66 && rs_field != 0) ||  // BEQZC
501                 (opcode == POP76 && rs_field != 0);    // BNEZC
502   }
503   return isBranch;
504 }
505 
IsBc(Instr instr)506 bool Assembler::IsBc(Instr instr) {
507   uint32_t opcode = GetOpcodeField(instr);
508   // Checks if the instruction is a BC or BALC.
509   return opcode == BC || opcode == BALC;
510 }
511 
IsNal(Instr instr)512 bool Assembler::IsNal(Instr instr) {
513   uint32_t opcode = GetOpcodeField(instr);
514   uint32_t rt_field = GetRtField(instr);
515   uint32_t rs_field = GetRsField(instr);
516   return opcode == REGIMM && rt_field == BLTZAL && rs_field == 0;
517 }
518 
IsBzc(Instr instr)519 bool Assembler::IsBzc(Instr instr) {
520   uint32_t opcode = GetOpcodeField(instr);
521   // Checks if the instruction is BEQZC or BNEZC.
522   return (opcode == POP66 && GetRsField(instr) != 0) ||
523          (opcode == POP76 && GetRsField(instr) != 0);
524 }
525 
IsEmittedConstant(Instr instr)526 bool Assembler::IsEmittedConstant(Instr instr) {
527   uint32_t label_constant = GetLabelConst(instr);
528   return label_constant == 0;  // Emitted label const in reg-exp engine.
529 }
530 
IsBeq(Instr instr)531 bool Assembler::IsBeq(Instr instr) { return GetOpcodeField(instr) == BEQ; }
532 
IsBne(Instr instr)533 bool Assembler::IsBne(Instr instr) { return GetOpcodeField(instr) == BNE; }
534 
IsBeqzc(Instr instr)535 bool Assembler::IsBeqzc(Instr instr) {
536   uint32_t opcode = GetOpcodeField(instr);
537   return opcode == POP66 && GetRsField(instr) != 0;
538 }
539 
IsBnezc(Instr instr)540 bool Assembler::IsBnezc(Instr instr) {
541   uint32_t opcode = GetOpcodeField(instr);
542   return opcode == POP76 && GetRsField(instr) != 0;
543 }
544 
IsBeqc(Instr instr)545 bool Assembler::IsBeqc(Instr instr) {
546   uint32_t opcode = GetOpcodeField(instr);
547   uint32_t rs = GetRsField(instr);
548   uint32_t rt = GetRtField(instr);
549   return opcode == POP10 && rs != 0 && rs < rt;  // && rt != 0
550 }
551 
IsBnec(Instr instr)552 bool Assembler::IsBnec(Instr instr) {
553   uint32_t opcode = GetOpcodeField(instr);
554   uint32_t rs = GetRsField(instr);
555   uint32_t rt = GetRtField(instr);
556   return opcode == POP30 && rs != 0 && rs < rt;  // && rt != 0
557 }
558 
IsJicOrJialc(Instr instr)559 bool Assembler::IsJicOrJialc(Instr instr) {
560   uint32_t opcode = GetOpcodeField(instr);
561   uint32_t rs = GetRsField(instr);
562   return (opcode == POP66 || opcode == POP76) && rs == 0;
563 }
564 
IsJump(Instr instr)565 bool Assembler::IsJump(Instr instr) {
566   uint32_t opcode = GetOpcodeField(instr);
567   uint32_t rt_field = GetRtField(instr);
568   uint32_t rd_field = GetRdField(instr);
569   uint32_t function_field = GetFunctionField(instr);
570   // Checks if the instruction is a jump.
571   return opcode == J || opcode == JAL ||
572          (opcode == SPECIAL && rt_field == 0 &&
573           ((function_field == JALR) ||
574            (rd_field == 0 && (function_field == JR))));
575 }
576 
IsJ(Instr instr)577 bool Assembler::IsJ(Instr instr) {
578   uint32_t opcode = GetOpcodeField(instr);
579   // Checks if the instruction is a jump.
580   return opcode == J;
581 }
582 
IsJal(Instr instr)583 bool Assembler::IsJal(Instr instr) { return GetOpcodeField(instr) == JAL; }
584 
IsJr(Instr instr)585 bool Assembler::IsJr(Instr instr) {
586   if (!IsMipsArchVariant(kMips32r6)) {
587     return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
588   } else {
589     return GetOpcodeField(instr) == SPECIAL && GetRdField(instr) == 0 &&
590            GetFunctionField(instr) == JALR;
591   }
592 }
593 
IsJalr(Instr instr)594 bool Assembler::IsJalr(Instr instr) {
595   return GetOpcodeField(instr) == SPECIAL && GetRdField(instr) != 0 &&
596          GetFunctionField(instr) == JALR;
597 }
598 
IsLui(Instr instr)599 bool Assembler::IsLui(Instr instr) {
600   uint32_t opcode = GetOpcodeField(instr);
601   // Checks if the instruction is a load upper immediate.
602   return opcode == LUI;
603 }
604 
IsOri(Instr instr)605 bool Assembler::IsOri(Instr instr) {
606   uint32_t opcode = GetOpcodeField(instr);
607   // Checks if the instruction is a load upper immediate.
608   return opcode == ORI;
609 }
610 
IsAddu(Instr instr,Register rd,Register rs,Register rt)611 bool Assembler::IsAddu(Instr instr, Register rd, Register rs, Register rt) {
612   uint32_t opcode = GetOpcodeField(instr);
613   uint32_t rd_field = GetRd(instr);
614   uint32_t rs_field = GetRs(instr);
615   uint32_t rt_field = GetRt(instr);
616   uint32_t sa_field = GetSaField(instr);
617   uint32_t rd_reg = static_cast<uint32_t>(rd.code());
618   uint32_t rs_reg = static_cast<uint32_t>(rs.code());
619   uint32_t rt_reg = static_cast<uint32_t>(rt.code());
620   uint32_t function_field = GetFunction(instr);
621   return opcode == SPECIAL && sa_field == 0 && function_field == ADDU &&
622          rd_reg == rd_field && rs_reg == rs_field && rt_reg == rt_field;
623 }
624 
IsMov(Instr instr,Register rd,Register rs)625 bool Assembler::IsMov(Instr instr, Register rd, Register rs) {
626   uint32_t opcode = GetOpcodeField(instr);
627   uint32_t rd_field = GetRd(instr);
628   uint32_t rs_field = GetRs(instr);
629   uint32_t rt_field = GetRt(instr);
630   uint32_t rd_reg = static_cast<uint32_t>(rd.code());
631   uint32_t rs_reg = static_cast<uint32_t>(rs.code());
632   uint32_t function_field = GetFunctionField(instr);
633   // Checks if the instruction is a OR with zero_reg argument (aka MOV).
634   bool res = opcode == SPECIAL && function_field == OR && rd_field == rd_reg &&
635              rs_field == rs_reg && rt_field == 0;
636   return res;
637 }
638 
IsNop(Instr instr,unsigned int type)639 bool Assembler::IsNop(Instr instr, unsigned int type) {
640   // See Assembler::nop(type).
641   DCHECK_LT(type, 32);
642   uint32_t opcode = GetOpcodeField(instr);
643   uint32_t function = GetFunctionField(instr);
644   uint32_t rt = GetRt(instr);
645   uint32_t rd = GetRd(instr);
646   uint32_t sa = GetSa(instr);
647 
648   // Traditional mips nop == sll(zero_reg, zero_reg, 0)
649   // When marking non-zero type, use sll(zero_reg, at, type)
650   // to avoid use of mips ssnop and ehb special encodings
651   // of the sll instruction.
652 
653   Register nop_rt_reg = (type == 0) ? zero_reg : at;
654   bool ret = (opcode == SPECIAL && function == SLL &&
655               rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
656               rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) && sa == type);
657 
658   return ret;
659 }
660 
GetBranchOffset(Instr instr)661 int32_t Assembler::GetBranchOffset(Instr instr) {
662   DCHECK(IsBranch(instr));
663   return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
664 }
665 
IsLw(Instr instr)666 bool Assembler::IsLw(Instr instr) {
667   return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
668 }
669 
GetLwOffset(Instr instr)670 int16_t Assembler::GetLwOffset(Instr instr) {
671   DCHECK(IsLw(instr));
672   return ((instr & kImm16Mask));
673 }
674 
SetLwOffset(Instr instr,int16_t offset)675 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
676   DCHECK(IsLw(instr));
677 
678   // We actually create a new lw instruction based on the original one.
679   Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask) |
680                      (offset & kImm16Mask);
681 
682   return temp_instr;
683 }
684 
IsSw(Instr instr)685 bool Assembler::IsSw(Instr instr) {
686   return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
687 }
688 
SetSwOffset(Instr instr,int16_t offset)689 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
690   DCHECK(IsSw(instr));
691   return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
692 }
693 
IsAddImmediate(Instr instr)694 bool Assembler::IsAddImmediate(Instr instr) {
695   return ((instr & kOpcodeMask) == ADDIU);
696 }
697 
SetAddImmediateOffset(Instr instr,int16_t offset)698 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
699   DCHECK(IsAddImmediate(instr));
700   return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
701 }
702 
IsAndImmediate(Instr instr)703 bool Assembler::IsAndImmediate(Instr instr) {
704   return GetOpcodeField(instr) == ANDI;
705 }
706 
OffsetSizeInBits(Instr instr)707 static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
708   if (IsMipsArchVariant(kMips32r6)) {
709     if (Assembler::IsBc(instr)) {
710       return Assembler::OffsetSize::kOffset26;
711     } else if (Assembler::IsBzc(instr)) {
712       return Assembler::OffsetSize::kOffset21;
713     }
714   }
715   return Assembler::OffsetSize::kOffset16;
716 }
717 
AddBranchOffset(int pos,Instr instr)718 static inline int32_t AddBranchOffset(int pos, Instr instr) {
719   int bits = OffsetSizeInBits(instr);
720   const int32_t mask = (1 << bits) - 1;
721   bits = 32 - bits;
722 
723   // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
724   // the compiler uses arithmetic shifts for signed integers.
725   int32_t imm = ((instr & mask) << bits) >> (bits - 2);
726 
727   if (imm == kEndOfChain) {
728     // EndOfChain sentinel is returned directly, not relative to pc or pos.
729     return kEndOfChain;
730   } else {
731     return pos + Assembler::kBranchPCOffset + imm;
732   }
733 }
734 
CreateTargetAddress(Instr instr_lui,Instr instr_jic)735 uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) {
736   DCHECK(IsLui(instr_lui) && IsJicOrJialc(instr_jic));
737   int16_t jic_offset = GetImmediate16(instr_jic);
738   int16_t lui_offset = GetImmediate16(instr_lui);
739 
740   if (jic_offset < 0) {
741     lui_offset += kImm16Mask;
742   }
743   uint32_t lui_offset_u = (static_cast<uint32_t>(lui_offset)) << kLuiShift;
744   uint32_t jic_offset_u = static_cast<uint32_t>(jic_offset) & kImm16Mask;
745 
746   return lui_offset_u | jic_offset_u;
747 }
748 
749 // Use just lui and jic instructions. Insert lower part of the target address in
750 // jic offset part. Since jic sign-extends offset and then add it with register,
751 // before that addition, difference between upper part of the target address and
752 // upper part of the sign-extended offset (0xFFFF or 0x0000), will be inserted
753 // in jic register with lui instruction.
UnpackTargetAddress(uint32_t address,int16_t * lui_offset,int16_t * jic_offset)754 void Assembler::UnpackTargetAddress(uint32_t address, int16_t* lui_offset,
755                                     int16_t* jic_offset) {
756   *lui_offset = (address & kHiMask) >> kLuiShift;
757   *jic_offset = address & kLoMask;
758 
759   if (*jic_offset < 0) {
760     *lui_offset -= kImm16Mask;
761   }
762 }
763 
UnpackTargetAddressUnsigned(uint32_t address,uint32_t * lui_offset,uint32_t * jic_offset)764 void Assembler::UnpackTargetAddressUnsigned(uint32_t address,
765                                             uint32_t* lui_offset,
766                                             uint32_t* jic_offset) {
767   int16_t lui_offset16 = (address & kHiMask) >> kLuiShift;
768   int16_t jic_offset16 = address & kLoMask;
769 
770   if (jic_offset16 < 0) {
771     lui_offset16 -= kImm16Mask;
772   }
773   *lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask;
774   *jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask;
775 }
776 
PatchLuiOriImmediate(int pc,int32_t imm,Instr instr_lui,Address offset_lui,Instr instr_ori,Address offset_ori)777 void Assembler::PatchLuiOriImmediate(int pc, int32_t imm, Instr instr_lui,
778                                      Address offset_lui, Instr instr_ori,
779                                      Address offset_ori) {
780   DCHECK(IsLui(instr_lui));
781   DCHECK(IsOri(instr_ori));
782   instr_at_put(static_cast<int>(pc + offset_lui),
783                instr_lui | ((imm >> kLuiShift) & kImm16Mask));
784   instr_at_put(static_cast<int>(pc + offset_ori),
785                instr_ori | (imm & kImm16Mask));
786 }
787 
PatchLuiOriImmediate(Address pc,int32_t imm,Instr instr_lui,Address offset_lui,Instr instr_ori,Address offset_ori)788 void Assembler::PatchLuiOriImmediate(Address pc, int32_t imm, Instr instr_lui,
789                                      Address offset_lui, Instr instr_ori,
790                                      Address offset_ori) {
791   DCHECK(IsLui(instr_lui));
792   DCHECK(IsOri(instr_ori));
793   instr_at_put(pc + offset_lui, instr_lui | ((imm >> kLuiShift) & kImm16Mask));
794   instr_at_put(pc + offset_ori, instr_ori | (imm & kImm16Mask));
795 }
796 
GetLuiOriImmediate(Instr instr_lui,Instr instr_ori)797 int32_t Assembler::GetLuiOriImmediate(Instr instr_lui, Instr instr_ori) {
798   DCHECK(IsLui(instr_lui));
799   DCHECK(IsOri(instr_ori));
800   int32_t imm;
801   imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
802   imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
803   return imm;
804 }
805 
target_at(int pos,bool is_internal)806 int Assembler::target_at(int pos, bool is_internal) {
807   Instr instr = instr_at(pos);
808   if (is_internal) {
809     if (instr == 0) {
810       return kEndOfChain;
811     } else {
812       int32_t instr_address = reinterpret_cast<int32_t>(buffer_start_ + pos);
813       int delta = static_cast<int>(instr_address - instr);
814       DCHECK(pos > delta);
815       return pos - delta;
816     }
817   }
818   if ((instr & ~kImm16Mask) == 0) {
819     // Emitted label constant, not part of a branch.
820     if (instr == 0) {
821       return kEndOfChain;
822     } else {
823       int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
824       return (imm18 + pos);
825     }
826   }
827   // Check we have a branch or jump instruction.
828   DCHECK(IsBranch(instr) || IsLui(instr) || IsMov(instr, t8, ra));
829   if (IsBranch(instr)) {
830     return AddBranchOffset(pos, instr);
831   } else if (IsMov(instr, t8, ra)) {
832     int32_t imm32;
833     Instr instr_lui = instr_at(pos + 2 * kInstrSize);
834     Instr instr_ori = instr_at(pos + 3 * kInstrSize);
835     imm32 = GetLuiOriImmediate(instr_lui, instr_ori);
836     if (imm32 == kEndOfJumpChain) {
837       // EndOfChain sentinel is returned directly, not relative to pc or pos.
838       return kEndOfChain;
839     }
840     return pos + Assembler::kLongBranchPCOffset + imm32;
841   } else {
842     DCHECK(IsLui(instr));
843     if (IsNal(instr_at(pos + kInstrSize))) {
844       int32_t imm32;
845       Instr instr_lui = instr_at(pos + 0 * kInstrSize);
846       Instr instr_ori = instr_at(pos + 2 * kInstrSize);
847       imm32 = GetLuiOriImmediate(instr_lui, instr_ori);
848       if (imm32 == kEndOfJumpChain) {
849         // EndOfChain sentinel is returned directly, not relative to pc or pos.
850         return kEndOfChain;
851       }
852       return pos + Assembler::kLongBranchPCOffset + imm32;
853     } else {
854       Instr instr1 = instr_at(pos + 0 * kInstrSize);
855       Instr instr2 = instr_at(pos + 1 * kInstrSize);
856       DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
857       int32_t imm;
858       if (IsJicOrJialc(instr2)) {
859         imm = CreateTargetAddress(instr1, instr2);
860       } else {
861         imm = GetLuiOriImmediate(instr1, instr2);
862       }
863 
864       if (imm == kEndOfJumpChain) {
865         // EndOfChain sentinel is returned directly, not relative to pc or pos.
866         return kEndOfChain;
867       } else {
868         uint32_t instr_address = reinterpret_cast<int32_t>(buffer_start_ + pos);
869         int32_t delta = instr_address - imm;
870         DCHECK(pos > delta);
871         return pos - delta;
872       }
873     }
874   }
875   return 0;
876 }
877 
SetBranchOffset(int32_t pos,int32_t target_pos,Instr instr)878 static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
879                                     Instr instr) {
880   int32_t bits = OffsetSizeInBits(instr);
881   int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
882   DCHECK_EQ(imm & 3, 0);
883   imm >>= 2;
884 
885   const int32_t mask = (1 << bits) - 1;
886   instr &= ~mask;
887   DCHECK(is_intn(imm, bits));
888 
889   return instr | (imm & mask);
890 }
891 
target_at_put(int32_t pos,int32_t target_pos,bool is_internal)892 void Assembler::target_at_put(int32_t pos, int32_t target_pos,
893                               bool is_internal) {
894   Instr instr = instr_at(pos);
895 
896   if (is_internal) {
897     uint32_t imm = reinterpret_cast<uint32_t>(buffer_start_) + target_pos;
898     instr_at_put(pos, imm);
899     return;
900   }
901   if ((instr & ~kImm16Mask) == 0) {
902     DCHECK(target_pos == kEndOfChain || target_pos >= 0);
903     // Emitted label constant, not part of a branch.
904     // Make label relative to Code pointer of generated Code object.
905     instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
906     return;
907   }
908 
909   DCHECK(IsBranch(instr) || IsLui(instr) || IsMov(instr, t8, ra));
910   if (IsBranch(instr)) {
911     instr = SetBranchOffset(pos, target_pos, instr);
912     instr_at_put(pos, instr);
913   } else if (IsMov(instr, t8, ra)) {
914     Instr instr_lui = instr_at(pos + 2 * kInstrSize);
915     Instr instr_ori = instr_at(pos + 3 * kInstrSize);
916     DCHECK(IsLui(instr_lui));
917     DCHECK(IsOri(instr_ori));
918 
919     int32_t imm_short = target_pos - (pos + Assembler::kBranchPCOffset);
920 
921     if (is_int16(imm_short)) {
922       // Optimize by converting to regular branch with 16-bit
923       // offset
924       Instr instr_b = BEQ;
925       instr_b = SetBranchOffset(pos, target_pos, instr_b);
926 
927       Instr instr_j = instr_at(pos + 5 * kInstrSize);
928       Instr instr_branch_delay;
929 
930       if (IsJump(instr_j)) {
931         // Case when branch delay slot is protected.
932         instr_branch_delay = nopInstr;
933       } else {
934         // Case when branch delay slot is used.
935         instr_branch_delay = instr_at(pos + 7 * kInstrSize);
936       }
937       instr_at_put(pos + 0 * kInstrSize, instr_b);
938       instr_at_put(pos + 1 * kInstrSize, instr_branch_delay);
939     } else {
940       int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
941       DCHECK_EQ(imm & 3, 0);
942 
943       instr_lui &= ~kImm16Mask;
944       instr_ori &= ~kImm16Mask;
945 
946       PatchLuiOriImmediate(pos, imm, instr_lui, 2 * kInstrSize, instr_ori,
947                            3 * kInstrSize);
948     }
949   } else {
950     DCHECK(IsLui(instr));
951     if (IsNal(instr_at(pos + kInstrSize))) {
952       Instr instr_lui = instr_at(pos + 0 * kInstrSize);
953       Instr instr_ori = instr_at(pos + 2 * kInstrSize);
954       DCHECK(IsLui(instr_lui));
955       DCHECK(IsOri(instr_ori));
956       int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
957       DCHECK_EQ(imm & 3, 0);
958       if (is_int16(imm + Assembler::kLongBranchPCOffset -
959                    Assembler::kBranchPCOffset)) {
960         // Optimize by converting to regular branch and link with 16-bit
961         // offset.
962         Instr instr_b = REGIMM | BGEZAL;  // Branch and link.
963         instr_b = SetBranchOffset(pos, target_pos, instr_b);
964         // Correct ra register to point to one instruction after jalr from
965         // TurboAssembler::BranchAndLinkLong.
966         Instr instr_a = ADDIU | ra.code() << kRsShift | ra.code() << kRtShift |
967                         kOptimizedBranchAndLinkLongReturnOffset;
968 
969         instr_at_put(pos, instr_b);
970         instr_at_put(pos + 1 * kInstrSize, instr_a);
971       } else {
972         instr_lui &= ~kImm16Mask;
973         instr_ori &= ~kImm16Mask;
974         PatchLuiOriImmediate(pos, imm, instr_lui, 0 * kInstrSize, instr_ori,
975                              2 * kInstrSize);
976       }
977     } else {
978       Instr instr1 = instr_at(pos + 0 * kInstrSize);
979       Instr instr2 = instr_at(pos + 1 * kInstrSize);
980       DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
981       uint32_t imm = reinterpret_cast<uint32_t>(buffer_start_) + target_pos;
982       DCHECK_EQ(imm & 3, 0);
983       DCHECK(IsLui(instr1) && (IsJicOrJialc(instr2) || IsOri(instr2)));
984       instr1 &= ~kImm16Mask;
985       instr2 &= ~kImm16Mask;
986 
987       if (IsJicOrJialc(instr2)) {
988         uint32_t lui_offset_u, jic_offset_u;
989         UnpackTargetAddressUnsigned(imm, &lui_offset_u, &jic_offset_u);
990         instr_at_put(pos + 0 * kInstrSize, instr1 | lui_offset_u);
991         instr_at_put(pos + 1 * kInstrSize, instr2 | jic_offset_u);
992       } else {
993         PatchLuiOriImmediate(pos, imm, instr1, 0 * kInstrSize, instr2,
994                              1 * kInstrSize);
995       }
996     }
997   }
998 }
999 
print(const Label * L)1000 void Assembler::print(const Label* L) {
1001   if (L->is_unused()) {
1002     PrintF("unused label\n");
1003   } else if (L->is_bound()) {
1004     PrintF("bound label to %d\n", L->pos());
1005   } else if (L->is_linked()) {
1006     Label l;
1007     l.link_to(L->pos());
1008     PrintF("unbound label");
1009     while (l.is_linked()) {
1010       PrintF("@ %d ", l.pos());
1011       Instr instr = instr_at(l.pos());
1012       if ((instr & ~kImm16Mask) == 0) {
1013         PrintF("value\n");
1014       } else {
1015         PrintF("%d\n", instr);
1016       }
1017       next(&l, is_internal_reference(&l));
1018     }
1019   } else {
1020     PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
1021   }
1022 }
1023 
bind_to(Label * L,int pos)1024 void Assembler::bind_to(Label* L, int pos) {
1025   DCHECK(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
1026   int32_t trampoline_pos = kInvalidSlotPos;
1027   bool is_internal = false;
1028   if (L->is_linked() && !trampoline_emitted_) {
1029     unbound_labels_count_--;
1030     if (!is_internal_reference(L)) {
1031       next_buffer_check_ += kTrampolineSlotsSize;
1032     }
1033   }
1034 
1035   while (L->is_linked()) {
1036     int32_t fixup_pos = L->pos();
1037     int32_t dist = pos - fixup_pos;
1038     is_internal = is_internal_reference(L);
1039     next(L, is_internal);  // Call next before overwriting link with target at
1040                            // fixup_pos.
1041     Instr instr = instr_at(fixup_pos);
1042     if (is_internal) {
1043       target_at_put(fixup_pos, pos, is_internal);
1044     } else {
1045       if (IsBranch(instr)) {
1046         int branch_offset = BranchOffset(instr);
1047         if (dist > branch_offset) {
1048           if (trampoline_pos == kInvalidSlotPos) {
1049             trampoline_pos = get_trampoline_entry(fixup_pos);
1050             CHECK_NE(trampoline_pos, kInvalidSlotPos);
1051           }
1052           CHECK((trampoline_pos - fixup_pos) <= branch_offset);
1053           target_at_put(fixup_pos, trampoline_pos, false);
1054           fixup_pos = trampoline_pos;
1055         }
1056         target_at_put(fixup_pos, pos, false);
1057       } else {
1058         target_at_put(fixup_pos, pos, false);
1059       }
1060     }
1061   }
1062   L->bind_to(pos);
1063 
1064   // Keep track of the last bound label so we don't eliminate any instructions
1065   // before a bound label.
1066   if (pos > last_bound_pos_) last_bound_pos_ = pos;
1067 }
1068 
bind(Label * L)1069 void Assembler::bind(Label* L) {
1070   DCHECK(!L->is_bound());  // Label can only be bound once.
1071   bind_to(L, pc_offset());
1072 }
1073 
next(Label * L,bool is_internal)1074 void Assembler::next(Label* L, bool is_internal) {
1075   DCHECK(L->is_linked());
1076   int link = target_at(L->pos(), is_internal);
1077   if (link == kEndOfChain) {
1078     L->Unuse();
1079   } else {
1080     DCHECK_GE(link, 0);
1081     L->link_to(link);
1082   }
1083 }
1084 
is_near(Label * L)1085 bool Assembler::is_near(Label* L) {
1086   DCHECK(L->is_bound());
1087   return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
1088 }
1089 
is_near(Label * L,OffsetSize bits)1090 bool Assembler::is_near(Label* L, OffsetSize bits) {
1091   if (L == nullptr || !L->is_bound()) return true;
1092   return pc_offset() - L->pos() < (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize;
1093 }
1094 
is_near_branch(Label * L)1095 bool Assembler::is_near_branch(Label* L) {
1096   DCHECK(L->is_bound());
1097   return IsMipsArchVariant(kMips32r6) ? is_near_r6(L) : is_near_pre_r6(L);
1098 }
1099 
BranchOffset(Instr instr)1100 int Assembler::BranchOffset(Instr instr) {
1101   // At pre-R6 and for other R6 branches the offset is 16 bits.
1102   int bits = OffsetSize::kOffset16;
1103 
1104   if (IsMipsArchVariant(kMips32r6)) {
1105     uint32_t opcode = GetOpcodeField(instr);
1106     switch (opcode) {
1107       // Checks BC or BALC.
1108       case BC:
1109       case BALC:
1110         bits = OffsetSize::kOffset26;
1111         break;
1112 
1113       // Checks BEQZC or BNEZC.
1114       case POP66:
1115       case POP76:
1116         if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
1117         break;
1118       default:
1119         break;
1120     }
1121   }
1122 
1123   return (1 << (bits + 2 - 1)) - 1;
1124 }
1125 
1126 // We have to use a temporary register for things that can be relocated even
1127 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
1128 // space.  There is no guarantee that the relocated location can be similarly
1129 // encoded.
MustUseReg(RelocInfo::Mode rmode)1130 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
1131   return !RelocInfo::IsNone(rmode);
1132 }
1133 
GenInstrRegister(Opcode opcode,Register rs,Register rt,Register rd,uint16_t sa,SecondaryField func)1134 void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt,
1135                                  Register rd, uint16_t sa,
1136                                  SecondaryField func) {
1137   DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
1138   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1139                 (rd.code() << kRdShift) | (sa << kSaShift) | func;
1140   emit(instr);
1141 }
1142 
GenInstrRegister(Opcode opcode,Register rs,Register rt,uint16_t msb,uint16_t lsb,SecondaryField func)1143 void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt,
1144                                  uint16_t msb, uint16_t lsb,
1145                                  SecondaryField func) {
1146   DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
1147   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1148                 (msb << kRdShift) | (lsb << kSaShift) | func;
1149   emit(instr);
1150 }
1151 
GenInstrRegister(Opcode opcode,SecondaryField fmt,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)1152 void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt,
1153                                  FPURegister ft, FPURegister fs, FPURegister fd,
1154                                  SecondaryField func) {
1155   DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
1156   Instr instr = opcode | fmt | (ft.code() << kFtShift) |
1157                 (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1158   emit(instr);
1159 }
1160 
GenInstrRegister(Opcode opcode,FPURegister fr,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)1161 void Assembler::GenInstrRegister(Opcode opcode, FPURegister fr, FPURegister ft,
1162                                  FPURegister fs, FPURegister fd,
1163                                  SecondaryField func) {
1164   DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
1165   Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift) |
1166                 (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1167   emit(instr);
1168 }
1169 
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPURegister fs,FPURegister fd,SecondaryField func)1170 void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt,
1171                                  FPURegister fs, FPURegister fd,
1172                                  SecondaryField func) {
1173   DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
1174   Instr instr = opcode | fmt | (rt.code() << kRtShift) |
1175                 (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1176   emit(instr);
1177 }
1178 
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPUControlRegister fs,SecondaryField func)1179 void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt,
1180                                  FPUControlRegister fs, SecondaryField func) {
1181   DCHECK(fs.is_valid() && rt.is_valid());
1182   Instr instr =
1183       opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
1184   emit(instr);
1185 }
1186 
1187 // Instructions with immediate value.
1188 // Registers are in the order of the instruction encoding, from left to right.
GenInstrImmediate(Opcode opcode,Register rs,Register rt,int32_t j,CompactBranchType is_compact_branch)1189 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
1190                                   int32_t j,
1191                                   CompactBranchType is_compact_branch) {
1192   DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
1193   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1194                 (j & kImm16Mask);
1195   emit(instr, is_compact_branch);
1196 }
1197 
GenInstrImmediate(Opcode opcode,Register base,Register rt,int32_t offset9,int bit6,SecondaryField func)1198 void Assembler::GenInstrImmediate(Opcode opcode, Register base, Register rt,
1199                                   int32_t offset9, int bit6,
1200                                   SecondaryField func) {
1201   DCHECK(base.is_valid() && rt.is_valid() && is_int9(offset9) &&
1202          is_uint1(bit6));
1203   Instr instr = opcode | (base.code() << kBaseShift) | (rt.code() << kRtShift) |
1204                 ((offset9 << kImm9Shift) & kImm9Mask) | bit6 << kBit6Shift |
1205                 func;
1206   emit(instr);
1207 }
1208 
GenInstrImmediate(Opcode opcode,Register rs,SecondaryField SF,int32_t j,CompactBranchType is_compact_branch)1209 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
1210                                   int32_t j,
1211                                   CompactBranchType is_compact_branch) {
1212   DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
1213   Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
1214   emit(instr, is_compact_branch);
1215 }
1216 
GenInstrImmediate(Opcode opcode,Register rs,FPURegister ft,int32_t j,CompactBranchType is_compact_branch)1217 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
1218                                   int32_t j,
1219                                   CompactBranchType is_compact_branch) {
1220   DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
1221   Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) |
1222                 (j & kImm16Mask);
1223   emit(instr, is_compact_branch);
1224 }
1225 
GenInstrImmediate(Opcode opcode,Register rs,int32_t offset21,CompactBranchType is_compact_branch)1226 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
1227                                   CompactBranchType is_compact_branch) {
1228   DCHECK(rs.is_valid() && (is_int21(offset21)));
1229   Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1230   emit(instr, is_compact_branch);
1231 }
1232 
GenInstrImmediate(Opcode opcode,Register rs,uint32_t offset21)1233 void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
1234                                   uint32_t offset21) {
1235   DCHECK(rs.is_valid() && (is_uint21(offset21)));
1236   Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1237   emit(instr);
1238 }
1239 
GenInstrImmediate(Opcode opcode,int32_t offset26,CompactBranchType is_compact_branch)1240 void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
1241                                   CompactBranchType is_compact_branch) {
1242   DCHECK(is_int26(offset26));
1243   Instr instr = opcode | (offset26 & kImm26Mask);
1244   emit(instr, is_compact_branch);
1245 }
1246 
GenInstrJump(Opcode opcode,uint32_t address)1247 void Assembler::GenInstrJump(Opcode opcode, uint32_t address) {
1248   BlockTrampolinePoolScope block_trampoline_pool(this);
1249   DCHECK(is_uint26(address));
1250   Instr instr = opcode | address;
1251   emit(instr);
1252   BlockTrampolinePoolFor(1);  // For associated delay slot.
1253 }
1254 
1255 // MSA instructions
GenInstrMsaI8(SecondaryField operation,uint32_t imm8,MSARegister ws,MSARegister wd)1256 void Assembler::GenInstrMsaI8(SecondaryField operation, uint32_t imm8,
1257                               MSARegister ws, MSARegister wd) {
1258   DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1259   DCHECK(ws.is_valid() && wd.is_valid() && is_uint8(imm8));
1260   Instr instr = MSA | operation | ((imm8 & kImm8Mask) << kWtShift) |
1261                 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1262   emit(instr);
1263 }
1264 
GenInstrMsaI5(SecondaryField operation,SecondaryField df,int32_t imm5,MSARegister ws,MSARegister wd)1265 void Assembler::GenInstrMsaI5(SecondaryField operation, SecondaryField df,
1266                               int32_t imm5, MSARegister ws, MSARegister wd) {
1267   DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1268   DCHECK(ws.is_valid() && wd.is_valid());
1269   DCHECK((operation == MAXI_S) || (operation == MINI_S) ||
1270                  (operation == CEQI) || (operation == CLTI_S) ||
1271                  (operation == CLEI_S)
1272              ? is_int5(imm5)
1273              : is_uint5(imm5));
1274   Instr instr = MSA | operation | df | ((imm5 & kImm5Mask) << kWtShift) |
1275                 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1276   emit(instr);
1277 }
1278 
GenInstrMsaBit(SecondaryField operation,SecondaryField df,uint32_t m,MSARegister ws,MSARegister wd)1279 void Assembler::GenInstrMsaBit(SecondaryField operation, SecondaryField df,
1280                                uint32_t m, MSARegister ws, MSARegister wd) {
1281   DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1282   DCHECK(ws.is_valid() && wd.is_valid() && is_valid_msa_df_m(df, m));
1283   Instr instr = MSA | operation | df | (m << kWtShift) |
1284                 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1285   emit(instr);
1286 }
1287 
GenInstrMsaI10(SecondaryField operation,SecondaryField df,int32_t imm10,MSARegister wd)1288 void Assembler::GenInstrMsaI10(SecondaryField operation, SecondaryField df,
1289                                int32_t imm10, MSARegister wd) {
1290   DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1291   DCHECK(wd.is_valid() && is_int10(imm10));
1292   Instr instr = MSA | operation | df | ((imm10 & kImm10Mask) << kWsShift) |
1293                 (wd.code() << kWdShift);
1294   emit(instr);
1295 }
1296 
1297 template <typename RegType>
GenInstrMsa3R(SecondaryField operation,SecondaryField df,RegType t,MSARegister ws,MSARegister wd)1298 void Assembler::GenInstrMsa3R(SecondaryField operation, SecondaryField df,
1299                               RegType t, MSARegister ws, MSARegister wd) {
1300   DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1301   DCHECK(t.is_valid() && ws.is_valid() && wd.is_valid());
1302   Instr instr = MSA | operation | df | (t.code() << kWtShift) |
1303                 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1304   emit(instr);
1305 }
1306 
1307 template <typename DstType, typename SrcType>
GenInstrMsaElm(SecondaryField operation,SecondaryField df,uint32_t n,SrcType src,DstType dst)1308 void Assembler::GenInstrMsaElm(SecondaryField operation, SecondaryField df,
1309                                uint32_t n, SrcType src, DstType dst) {
1310   DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1311   DCHECK(src.is_valid() && dst.is_valid() && is_valid_msa_df_n(df, n));
1312   Instr instr = MSA | operation | df | (n << kWtShift) |
1313                 (src.code() << kWsShift) | (dst.code() << kWdShift) |
1314                 MSA_ELM_MINOR;
1315   emit(instr);
1316 }
1317 
GenInstrMsa3RF(SecondaryField operation,uint32_t df,MSARegister wt,MSARegister ws,MSARegister wd)1318 void Assembler::GenInstrMsa3RF(SecondaryField operation, uint32_t df,
1319                                MSARegister wt, MSARegister ws, MSARegister wd) {
1320   DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1321   DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
1322   DCHECK_LT(df, 2);
1323   Instr instr = MSA | operation | (df << 21) | (wt.code() << kWtShift) |
1324                 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1325   emit(instr);
1326 }
1327 
GenInstrMsaVec(SecondaryField operation,MSARegister wt,MSARegister ws,MSARegister wd)1328 void Assembler::GenInstrMsaVec(SecondaryField operation, MSARegister wt,
1329                                MSARegister ws, MSARegister wd) {
1330   DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1331   DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
1332   Instr instr = MSA | operation | (wt.code() << kWtShift) |
1333                 (ws.code() << kWsShift) | (wd.code() << kWdShift) |
1334                 MSA_VEC_2R_2RF_MINOR;
1335   emit(instr);
1336 }
1337 
GenInstrMsaMI10(SecondaryField operation,int32_t s10,Register rs,MSARegister wd)1338 void Assembler::GenInstrMsaMI10(SecondaryField operation, int32_t s10,
1339                                 Register rs, MSARegister wd) {
1340   DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1341   DCHECK(rs.is_valid() && wd.is_valid() && is_int10(s10));
1342   Instr instr = MSA | operation | ((s10 & kImm10Mask) << kWtShift) |
1343                 (rs.code() << kWsShift) | (wd.code() << kWdShift);
1344   emit(instr);
1345 }
1346 
GenInstrMsa2R(SecondaryField operation,SecondaryField df,MSARegister ws,MSARegister wd)1347 void Assembler::GenInstrMsa2R(SecondaryField operation, SecondaryField df,
1348                               MSARegister ws, MSARegister wd) {
1349   DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1350   DCHECK(ws.is_valid() && wd.is_valid());
1351   Instr instr = MSA | MSA_2R_FORMAT | operation | df | (ws.code() << kWsShift) |
1352                 (wd.code() << kWdShift) | MSA_VEC_2R_2RF_MINOR;
1353   emit(instr);
1354 }
1355 
GenInstrMsa2RF(SecondaryField operation,SecondaryField df,MSARegister ws,MSARegister wd)1356 void Assembler::GenInstrMsa2RF(SecondaryField operation, SecondaryField df,
1357                                MSARegister ws, MSARegister wd) {
1358   DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1359   DCHECK(ws.is_valid() && wd.is_valid());
1360   Instr instr = MSA | MSA_2RF_FORMAT | operation | df |
1361                 (ws.code() << kWsShift) | (wd.code() << kWdShift) |
1362                 MSA_VEC_2R_2RF_MINOR;
1363   emit(instr);
1364 }
1365 
GenInstrMsaBranch(SecondaryField operation,MSARegister wt,int32_t offset16)1366 void Assembler::GenInstrMsaBranch(SecondaryField operation, MSARegister wt,
1367                                   int32_t offset16) {
1368   DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1369   DCHECK(wt.is_valid() && is_int16(offset16));
1370   BlockTrampolinePoolScope block_trampoline_pool(this);
1371   Instr instr =
1372       COP1 | operation | (wt.code() << kWtShift) | (offset16 & kImm16Mask);
1373   emit(instr);
1374   BlockTrampolinePoolFor(1);  // For associated delay slot.
1375 }
1376 
1377 // Returns the next free trampoline entry.
get_trampoline_entry(int32_t pos)1378 int32_t Assembler::get_trampoline_entry(int32_t pos) {
1379   int32_t trampoline_entry = kInvalidSlotPos;
1380 
1381   if (!internal_trampoline_exception_) {
1382     if (trampoline_.start() > pos) {
1383       trampoline_entry = trampoline_.take_slot();
1384     }
1385 
1386     if (kInvalidSlotPos == trampoline_entry) {
1387       internal_trampoline_exception_ = true;
1388     }
1389   }
1390   return trampoline_entry;
1391 }
1392 
jump_address(Label * L)1393 uint32_t Assembler::jump_address(Label* L) {
1394   int32_t target_pos;
1395 
1396   if (L->is_bound()) {
1397     target_pos = L->pos();
1398   } else {
1399     if (L->is_linked()) {
1400       target_pos = L->pos();  // L's link.
1401       L->link_to(pc_offset());
1402     } else {
1403       L->link_to(pc_offset());
1404       return kEndOfJumpChain;
1405     }
1406   }
1407 
1408   uint32_t imm = reinterpret_cast<uint32_t>(buffer_start_) + target_pos;
1409   DCHECK_EQ(imm & 3, 0);
1410 
1411   return imm;
1412 }
1413 
branch_long_offset(Label * L)1414 uint32_t Assembler::branch_long_offset(Label* L) {
1415   int32_t target_pos;
1416 
1417   if (L->is_bound()) {
1418     target_pos = L->pos();
1419   } else {
1420     if (L->is_linked()) {
1421       target_pos = L->pos();  // L's link.
1422       L->link_to(pc_offset());
1423     } else {
1424       L->link_to(pc_offset());
1425       return kEndOfJumpChain;
1426     }
1427   }
1428 
1429   DCHECK(is_int32(static_cast<int64_t>(target_pos) -
1430                   static_cast<int64_t>(pc_offset() + kLongBranchPCOffset)));
1431   int32_t offset = target_pos - (pc_offset() + kLongBranchPCOffset);
1432   DCHECK_EQ(offset & 3, 0);
1433 
1434   return offset;
1435 }
1436 
branch_offset_helper(Label * L,OffsetSize bits)1437 int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
1438   int32_t target_pos;
1439   int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1440 
1441   if (L->is_bound()) {
1442     target_pos = L->pos();
1443   } else {
1444     if (L->is_linked()) {
1445       target_pos = L->pos();
1446       L->link_to(pc_offset() + pad);
1447     } else {
1448       L->link_to(pc_offset() + pad);
1449       if (!trampoline_emitted_) {
1450         unbound_labels_count_++;
1451         next_buffer_check_ -= kTrampolineSlotsSize;
1452       }
1453       return kEndOfChain;
1454     }
1455   }
1456 
1457   int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
1458   DCHECK(is_intn(offset, bits + 2));
1459   DCHECK_EQ(offset & 3, 0);
1460 
1461   return offset;
1462 }
1463 
label_at_put(Label * L,int at_offset)1464 void Assembler::label_at_put(Label* L, int at_offset) {
1465   int target_pos;
1466   if (L->is_bound()) {
1467     target_pos = L->pos();
1468     instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1469   } else {
1470     if (L->is_linked()) {
1471       target_pos = L->pos();  // L's link.
1472       int32_t imm18 = target_pos - at_offset;
1473       DCHECK_EQ(imm18 & 3, 0);
1474       int32_t imm16 = imm18 >> 2;
1475       DCHECK(is_int16(imm16));
1476       instr_at_put(at_offset, (imm16 & kImm16Mask));
1477     } else {
1478       target_pos = kEndOfChain;
1479       instr_at_put(at_offset, 0);
1480       if (!trampoline_emitted_) {
1481         unbound_labels_count_++;
1482         next_buffer_check_ -= kTrampolineSlotsSize;
1483       }
1484     }
1485     L->link_to(at_offset);
1486   }
1487 }
1488 
1489 //------- Branch and jump instructions --------
1490 
b(int16_t offset)1491 void Assembler::b(int16_t offset) { beq(zero_reg, zero_reg, offset); }
1492 
bal(int16_t offset)1493 void Assembler::bal(int16_t offset) { bgezal(zero_reg, offset); }
1494 
bc(int32_t offset)1495 void Assembler::bc(int32_t offset) {
1496   DCHECK(IsMipsArchVariant(kMips32r6));
1497   GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
1498 }
1499 
balc(int32_t offset)1500 void Assembler::balc(int32_t offset) {
1501   DCHECK(IsMipsArchVariant(kMips32r6));
1502   GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
1503 }
1504 
beq(Register rs,Register rt,int16_t offset)1505 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1506   BlockTrampolinePoolScope block_trampoline_pool(this);
1507   GenInstrImmediate(BEQ, rs, rt, offset);
1508   BlockTrampolinePoolFor(1);  // For associated delay slot.
1509 }
1510 
bgez(Register rs,int16_t offset)1511 void Assembler::bgez(Register rs, int16_t offset) {
1512   BlockTrampolinePoolScope block_trampoline_pool(this);
1513   GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1514   BlockTrampolinePoolFor(1);  // For associated delay slot.
1515 }
1516 
bgezc(Register rt,int16_t offset)1517 void Assembler::bgezc(Register rt, int16_t offset) {
1518   DCHECK(IsMipsArchVariant(kMips32r6));
1519   DCHECK(rt != zero_reg);
1520   GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1521 }
1522 
bgeuc(Register rs,Register rt,int16_t offset)1523 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1524   DCHECK(IsMipsArchVariant(kMips32r6));
1525   DCHECK(rs != zero_reg);
1526   DCHECK(rt != zero_reg);
1527   DCHECK(rs.code() != rt.code());
1528   GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1529 }
1530 
bgec(Register rs,Register rt,int16_t offset)1531 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1532   DCHECK(IsMipsArchVariant(kMips32r6));
1533   DCHECK(rs != zero_reg);
1534   DCHECK(rt != zero_reg);
1535   DCHECK(rs.code() != rt.code());
1536   GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1537 }
1538 
bgezal(Register rs,int16_t offset)1539 void Assembler::bgezal(Register rs, int16_t offset) {
1540   DCHECK(!IsMipsArchVariant(kMips32r6) || rs == zero_reg);
1541   DCHECK(rs != ra);
1542   BlockTrampolinePoolScope block_trampoline_pool(this);
1543   GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1544   BlockTrampolinePoolFor(1);  // For associated delay slot.
1545 }
1546 
bgtz(Register rs,int16_t offset)1547 void Assembler::bgtz(Register rs, int16_t offset) {
1548   BlockTrampolinePoolScope block_trampoline_pool(this);
1549   GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1550   BlockTrampolinePoolFor(1);  // For associated delay slot.
1551 }
1552 
bgtzc(Register rt,int16_t offset)1553 void Assembler::bgtzc(Register rt, int16_t offset) {
1554   DCHECK(IsMipsArchVariant(kMips32r6));
1555   DCHECK(rt != zero_reg);
1556   GenInstrImmediate(BGTZL, zero_reg, rt, offset,
1557                     CompactBranchType::COMPACT_BRANCH);
1558 }
1559 
blez(Register rs,int16_t offset)1560 void Assembler::blez(Register rs, int16_t offset) {
1561   BlockTrampolinePoolScope block_trampoline_pool(this);
1562   GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1563   BlockTrampolinePoolFor(1);  // For associated delay slot.
1564 }
1565 
blezc(Register rt,int16_t offset)1566 void Assembler::blezc(Register rt, int16_t offset) {
1567   DCHECK(IsMipsArchVariant(kMips32r6));
1568   DCHECK(rt != zero_reg);
1569   GenInstrImmediate(BLEZL, zero_reg, rt, offset,
1570                     CompactBranchType::COMPACT_BRANCH);
1571 }
1572 
bltzc(Register rt,int16_t offset)1573 void Assembler::bltzc(Register rt, int16_t offset) {
1574   DCHECK(IsMipsArchVariant(kMips32r6));
1575   DCHECK(rt != zero_reg);
1576   GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1577 }
1578 
bltuc(Register rs,Register rt,int16_t offset)1579 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1580   DCHECK(IsMipsArchVariant(kMips32r6));
1581   DCHECK(rs != zero_reg);
1582   DCHECK(rt != zero_reg);
1583   DCHECK(rs.code() != rt.code());
1584   GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1585 }
1586 
bltc(Register rs,Register rt,int16_t offset)1587 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1588   DCHECK(IsMipsArchVariant(kMips32r6));
1589   DCHECK(rs != zero_reg);
1590   DCHECK(rt != zero_reg);
1591   DCHECK(rs.code() != rt.code());
1592   GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1593 }
1594 
bltz(Register rs,int16_t offset)1595 void Assembler::bltz(Register rs, int16_t offset) {
1596   BlockTrampolinePoolScope block_trampoline_pool(this);
1597   GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1598   BlockTrampolinePoolFor(1);  // For associated delay slot.
1599 }
1600 
bltzal(Register rs,int16_t offset)1601 void Assembler::bltzal(Register rs, int16_t offset) {
1602   DCHECK(!IsMipsArchVariant(kMips32r6) || rs == zero_reg);
1603   DCHECK(rs != ra);
1604   BlockTrampolinePoolScope block_trampoline_pool(this);
1605   GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1606   BlockTrampolinePoolFor(1);  // For associated delay slot.
1607 }
1608 
bne(Register rs,Register rt,int16_t offset)1609 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1610   BlockTrampolinePoolScope block_trampoline_pool(this);
1611   GenInstrImmediate(BNE, rs, rt, offset);
1612   BlockTrampolinePoolFor(1);  // For associated delay slot.
1613 }
1614 
bovc(Register rs,Register rt,int16_t offset)1615 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1616   DCHECK(IsMipsArchVariant(kMips32r6));
1617   if (rs.code() >= rt.code()) {
1618     GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1619   } else {
1620     GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1621   }
1622 }
1623 
bnvc(Register rs,Register rt,int16_t offset)1624 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1625   DCHECK(IsMipsArchVariant(kMips32r6));
1626   if (rs.code() >= rt.code()) {
1627     GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1628   } else {
1629     GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1630   }
1631 }
1632 
blezalc(Register rt,int16_t offset)1633 void Assembler::blezalc(Register rt, int16_t offset) {
1634   DCHECK(IsMipsArchVariant(kMips32r6));
1635   DCHECK(rt != zero_reg);
1636   DCHECK(rt != ra);
1637   GenInstrImmediate(BLEZ, zero_reg, rt, offset,
1638                     CompactBranchType::COMPACT_BRANCH);
1639 }
1640 
bgezalc(Register rt,int16_t offset)1641 void Assembler::bgezalc(Register rt, int16_t offset) {
1642   DCHECK(IsMipsArchVariant(kMips32r6));
1643   DCHECK(rt != zero_reg);
1644   DCHECK(rt != ra);
1645   GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1646 }
1647 
bgezall(Register rs,int16_t offset)1648 void Assembler::bgezall(Register rs, int16_t offset) {
1649   DCHECK(!IsMipsArchVariant(kMips32r6));
1650   DCHECK(rs != zero_reg);
1651   DCHECK(rs != ra);
1652   BlockTrampolinePoolScope block_trampoline_pool(this);
1653   GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1654   BlockTrampolinePoolFor(1);  // For associated delay slot.
1655 }
1656 
bltzalc(Register rt,int16_t offset)1657 void Assembler::bltzalc(Register rt, int16_t offset) {
1658   DCHECK(IsMipsArchVariant(kMips32r6));
1659   DCHECK(rt != zero_reg);
1660   DCHECK(rt != ra);
1661   GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1662 }
1663 
bgtzalc(Register rt,int16_t offset)1664 void Assembler::bgtzalc(Register rt, int16_t offset) {
1665   DCHECK(IsMipsArchVariant(kMips32r6));
1666   DCHECK(rt != zero_reg);
1667   DCHECK(rt != ra);
1668   GenInstrImmediate(BGTZ, zero_reg, rt, offset,
1669                     CompactBranchType::COMPACT_BRANCH);
1670 }
1671 
beqzalc(Register rt,int16_t offset)1672 void Assembler::beqzalc(Register rt, int16_t offset) {
1673   DCHECK(IsMipsArchVariant(kMips32r6));
1674   DCHECK(rt != zero_reg);
1675   DCHECK(rt != ra);
1676   GenInstrImmediate(ADDI, zero_reg, rt, offset,
1677                     CompactBranchType::COMPACT_BRANCH);
1678 }
1679 
bnezalc(Register rt,int16_t offset)1680 void Assembler::bnezalc(Register rt, int16_t offset) {
1681   DCHECK(IsMipsArchVariant(kMips32r6));
1682   DCHECK(rt != zero_reg);
1683   DCHECK(rt != ra);
1684   GenInstrImmediate(DADDI, zero_reg, rt, offset,
1685                     CompactBranchType::COMPACT_BRANCH);
1686 }
1687 
beqc(Register rs,Register rt,int16_t offset)1688 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1689   DCHECK(IsMipsArchVariant(kMips32r6));
1690   DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1691   if (rs.code() < rt.code()) {
1692     GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1693   } else {
1694     GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1695   }
1696 }
1697 
beqzc(Register rs,int32_t offset)1698 void Assembler::beqzc(Register rs, int32_t offset) {
1699   DCHECK(IsMipsArchVariant(kMips32r6));
1700   DCHECK(rs != zero_reg);
1701   GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
1702 }
1703 
bnec(Register rs,Register rt,int16_t offset)1704 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1705   DCHECK(IsMipsArchVariant(kMips32r6));
1706   DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1707   if (rs.code() < rt.code()) {
1708     GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1709   } else {
1710     GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1711   }
1712 }
1713 
bnezc(Register rs,int32_t offset)1714 void Assembler::bnezc(Register rs, int32_t offset) {
1715   DCHECK(IsMipsArchVariant(kMips32r6));
1716   DCHECK(rs != zero_reg);
1717   GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
1718 }
1719 
j(int32_t target)1720 void Assembler::j(int32_t target) {
1721 #if DEBUG
1722   // Get pc of delay slot.
1723   uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1724   bool in_range = ((ipc ^ static_cast<uint32_t>(target)) >>
1725                    (kImm26Bits + kImmFieldShift)) == 0;
1726   DCHECK(in_range && ((target & 3) == 0));
1727 #endif
1728   BlockTrampolinePoolScope block_trampoline_pool(this);
1729   GenInstrJump(J, (target >> 2) & kImm26Mask);
1730   BlockTrampolinePoolFor(1);  // For associated delay slot.
1731 }
1732 
jr(Register rs)1733 void Assembler::jr(Register rs) {
1734   if (!IsMipsArchVariant(kMips32r6)) {
1735     BlockTrampolinePoolScope block_trampoline_pool(this);
1736     GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1737     BlockTrampolinePoolFor(1);  // For associated delay slot.
1738   } else {
1739     jalr(rs, zero_reg);
1740   }
1741 }
1742 
jal(int32_t target)1743 void Assembler::jal(int32_t target) {
1744 #ifdef DEBUG
1745   // Get pc of delay slot.
1746   uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1747   bool in_range = ((ipc ^ static_cast<uint32_t>(target)) >>
1748                    (kImm26Bits + kImmFieldShift)) == 0;
1749   DCHECK(in_range && ((target & 3) == 0));
1750 #endif
1751   BlockTrampolinePoolScope block_trampoline_pool(this);
1752   GenInstrJump(JAL, (target >> 2) & kImm26Mask);
1753   BlockTrampolinePoolFor(1);  // For associated delay slot.
1754 }
1755 
jalr(Register rs,Register rd)1756 void Assembler::jalr(Register rs, Register rd) {
1757   DCHECK(rs.code() != rd.code());
1758   BlockTrampolinePoolScope block_trampoline_pool(this);
1759   GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1760   BlockTrampolinePoolFor(1);  // For associated delay slot.
1761 }
1762 
jic(Register rt,int16_t offset)1763 void Assembler::jic(Register rt, int16_t offset) {
1764   DCHECK(IsMipsArchVariant(kMips32r6));
1765   GenInstrImmediate(POP66, zero_reg, rt, offset);
1766 }
1767 
jialc(Register rt,int16_t offset)1768 void Assembler::jialc(Register rt, int16_t offset) {
1769   DCHECK(IsMipsArchVariant(kMips32r6));
1770   GenInstrImmediate(POP76, zero_reg, rt, offset);
1771 }
1772 
1773 // -------Data-processing-instructions---------
1774 
1775 // Arithmetic.
1776 
addu(Register rd,Register rs,Register rt)1777 void Assembler::addu(Register rd, Register rs, Register rt) {
1778   GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1779 }
1780 
addiu(Register rd,Register rs,int32_t j)1781 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1782   GenInstrImmediate(ADDIU, rs, rd, j);
1783 }
1784 
subu(Register rd,Register rs,Register rt)1785 void Assembler::subu(Register rd, Register rs, Register rt) {
1786   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1787 }
1788 
mul(Register rd,Register rs,Register rt)1789 void Assembler::mul(Register rd, Register rs, Register rt) {
1790   if (!IsMipsArchVariant(kMips32r6)) {
1791     GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1792   } else {
1793     GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1794   }
1795 }
1796 
mulu(Register rd,Register rs,Register rt)1797 void Assembler::mulu(Register rd, Register rs, Register rt) {
1798   DCHECK(IsMipsArchVariant(kMips32r6));
1799   GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1800 }
1801 
muh(Register rd,Register rs,Register rt)1802 void Assembler::muh(Register rd, Register rs, Register rt) {
1803   DCHECK(IsMipsArchVariant(kMips32r6));
1804   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1805 }
1806 
muhu(Register rd,Register rs,Register rt)1807 void Assembler::muhu(Register rd, Register rs, Register rt) {
1808   DCHECK(IsMipsArchVariant(kMips32r6));
1809   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1810 }
1811 
mod(Register rd,Register rs,Register rt)1812 void Assembler::mod(Register rd, Register rs, Register rt) {
1813   DCHECK(IsMipsArchVariant(kMips32r6));
1814   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1815 }
1816 
modu(Register rd,Register rs,Register rt)1817 void Assembler::modu(Register rd, Register rs, Register rt) {
1818   DCHECK(IsMipsArchVariant(kMips32r6));
1819   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1820 }
1821 
mult(Register rs,Register rt)1822 void Assembler::mult(Register rs, Register rt) {
1823   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1824 }
1825 
multu(Register rs,Register rt)1826 void Assembler::multu(Register rs, Register rt) {
1827   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1828 }
1829 
div(Register rs,Register rt)1830 void Assembler::div(Register rs, Register rt) {
1831   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1832 }
1833 
div(Register rd,Register rs,Register rt)1834 void Assembler::div(Register rd, Register rs, Register rt) {
1835   DCHECK(IsMipsArchVariant(kMips32r6));
1836   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1837 }
1838 
divu(Register rs,Register rt)1839 void Assembler::divu(Register rs, Register rt) {
1840   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1841 }
1842 
divu(Register rd,Register rs,Register rt)1843 void Assembler::divu(Register rd, Register rs, Register rt) {
1844   DCHECK(IsMipsArchVariant(kMips32r6));
1845   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1846 }
1847 
1848 // Logical.
1849 
and_(Register rd,Register rs,Register rt)1850 void Assembler::and_(Register rd, Register rs, Register rt) {
1851   GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1852 }
1853 
andi(Register rt,Register rs,int32_t j)1854 void Assembler::andi(Register rt, Register rs, int32_t j) {
1855   DCHECK(is_uint16(j));
1856   GenInstrImmediate(ANDI, rs, rt, j);
1857 }
1858 
or_(Register rd,Register rs,Register rt)1859 void Assembler::or_(Register rd, Register rs, Register rt) {
1860   GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1861 }
1862 
ori(Register rt,Register rs,int32_t j)1863 void Assembler::ori(Register rt, Register rs, int32_t j) {
1864   DCHECK(is_uint16(j));
1865   GenInstrImmediate(ORI, rs, rt, j);
1866 }
1867 
xor_(Register rd,Register rs,Register rt)1868 void Assembler::xor_(Register rd, Register rs, Register rt) {
1869   GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1870 }
1871 
xori(Register rt,Register rs,int32_t j)1872 void Assembler::xori(Register rt, Register rs, int32_t j) {
1873   DCHECK(is_uint16(j));
1874   GenInstrImmediate(XORI, rs, rt, j);
1875 }
1876 
nor(Register rd,Register rs,Register rt)1877 void Assembler::nor(Register rd, Register rs, Register rt) {
1878   GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1879 }
1880 
1881 // Shifts.
sll(Register rd,Register rt,uint16_t sa,bool coming_from_nop)1882 void Assembler::sll(Register rd, Register rt, uint16_t sa,
1883                     bool coming_from_nop) {
1884   // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1885   // generated using the sll instruction. They must be generated using
1886   // nop(int/NopMarkerTypes).
1887   DCHECK(coming_from_nop || !(rd == zero_reg && rt == zero_reg));
1888   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
1889 }
1890 
sllv(Register rd,Register rt,Register rs)1891 void Assembler::sllv(Register rd, Register rt, Register rs) {
1892   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1893 }
1894 
srl(Register rd,Register rt,uint16_t sa)1895 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1896   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
1897 }
1898 
srlv(Register rd,Register rt,Register rs)1899 void Assembler::srlv(Register rd, Register rt, Register rs) {
1900   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1901 }
1902 
sra(Register rd,Register rt,uint16_t sa)1903 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1904   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
1905 }
1906 
srav(Register rd,Register rt,Register rs)1907 void Assembler::srav(Register rd, Register rt, Register rs) {
1908   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1909 }
1910 
rotr(Register rd,Register rt,uint16_t sa)1911 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1912   // Should be called via MacroAssembler::Ror.
1913   DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1914   DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1915   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
1916                 (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1917   emit(instr);
1918 }
1919 
rotrv(Register rd,Register rt,Register rs)1920 void Assembler::rotrv(Register rd, Register rt, Register rs) {
1921   // Should be called via MacroAssembler::Ror.
1922   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1923   DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1924   Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1925                 (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1926   emit(instr);
1927 }
1928 
lsa(Register rd,Register rt,Register rs,uint8_t sa)1929 void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
1930   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1931   DCHECK_LE(sa, 3);
1932   DCHECK(IsMipsArchVariant(kMips32r6));
1933   Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
1934                 rd.code() << kRdShift | sa << kSaShift | LSA;
1935   emit(instr);
1936 }
1937 
1938 // ------------Memory-instructions-------------
1939 
AdjustBaseAndOffset(MemOperand * src,OffsetAccessType access_type,int second_access_add_to_offset)1940 void Assembler::AdjustBaseAndOffset(MemOperand* src,
1941                                     OffsetAccessType access_type,
1942                                     int second_access_add_to_offset) {
1943   // This method is used to adjust the base register and offset pair
1944   // for a load/store when the offset doesn't fit into int16_t.
1945   // It is assumed that 'base + offset' is sufficiently aligned for memory
1946   // operands that are machine word in size or smaller. For doubleword-sized
1947   // operands it's assumed that 'base' is a multiple of 8, while 'offset'
1948   // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments
1949   // and spilled variables on the stack accessed relative to the stack
1950   // pointer register).
1951   // We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8.
1952 
1953   bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0;
1954   bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
1955   DCHECK_LE(second_access_add_to_offset, 7);  // Must be <= 7.
1956 
1957   // is_int16 must be passed a signed value, hence the static cast below.
1958   if (is_int16(src->offset()) &&
1959       (!two_accesses || is_int16(static_cast<int32_t>(
1960                             src->offset() + second_access_add_to_offset)))) {
1961     // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
1962     // value) fits into int16_t.
1963     return;
1964   }
1965   UseScratchRegisterScope temps(this);
1966   Register scratch = temps.Acquire();
1967   DCHECK(src->rm() != scratch);  // Must not overwrite the register 'base'
1968                                  // while loading 'offset'.
1969 
1970 #ifdef DEBUG
1971   // Remember the "(mis)alignment" of 'offset', it will be checked at the end.
1972   uint32_t misalignment = src->offset() & (kDoubleSize - 1);
1973 #endif
1974 
1975   // Do not load the whole 32-bit 'offset' if it can be represented as
1976   // a sum of two 16-bit signed offsets. This can save an instruction or two.
1977   // To simplify matters, only do this for a symmetric range of offsets from
1978   // about -64KB to about +64KB, allowing further addition of 4 when accessing
1979   // 64-bit variables with two 32-bit accesses.
1980   constexpr int32_t kMinOffsetForSimpleAdjustment =
1981       0x7FF8;  // Max int16_t that's a multiple of 8.
1982   constexpr int32_t kMaxOffsetForSimpleAdjustment =
1983       2 * kMinOffsetForSimpleAdjustment;
1984   if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) {
1985     addiu(at, src->rm(), kMinOffsetForSimpleAdjustment);
1986     src->offset_ -= kMinOffsetForSimpleAdjustment;
1987   } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() &&
1988              src->offset() < 0) {
1989     addiu(at, src->rm(), -kMinOffsetForSimpleAdjustment);
1990     src->offset_ += kMinOffsetForSimpleAdjustment;
1991   } else if (IsMipsArchVariant(kMips32r6)) {
1992     // On r6 take advantage of the aui instruction, e.g.:
1993     //   aui   at, base, offset_high
1994     //   lw    reg_lo, offset_low(at)
1995     //   lw    reg_hi, (offset_low+4)(at)
1996     // or when offset_low+4 overflows int16_t:
1997     //   aui   at, base, offset_high
1998     //   addiu at, at, 8
1999     //   lw    reg_lo, (offset_low-8)(at)
2000     //   lw    reg_hi, (offset_low-4)(at)
2001     int16_t offset_high = static_cast<uint16_t>(src->offset() >> 16);
2002     int16_t offset_low = static_cast<uint16_t>(src->offset());
2003     offset_high += (offset_low < 0)
2004                        ? 1
2005                        : 0;  // Account for offset sign extension in load/store.
2006     aui(scratch, src->rm(), static_cast<uint16_t>(offset_high));
2007     if (two_accesses && !is_int16(static_cast<int32_t>(
2008                             offset_low + second_access_add_to_offset))) {
2009       // Avoid overflow in the 16-bit offset of the load/store instruction when
2010       // adding 4.
2011       addiu(scratch, scratch, kDoubleSize);
2012       offset_low -= kDoubleSize;
2013     }
2014     src->offset_ = offset_low;
2015   } else {
2016     // Do not load the whole 32-bit 'offset' if it can be represented as
2017     // a sum of three 16-bit signed offsets. This can save an instruction.
2018     // To simplify matters, only do this for a symmetric range of offsets from
2019     // about -96KB to about +96KB, allowing further addition of 4 when accessing
2020     // 64-bit variables with two 32-bit accesses.
2021     constexpr int32_t kMinOffsetForMediumAdjustment =
2022         2 * kMinOffsetForSimpleAdjustment;
2023     constexpr int32_t kMaxOffsetForMediumAdjustment =
2024         3 * kMinOffsetForSimpleAdjustment;
2025     if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) {
2026       addiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2);
2027       addiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2);
2028       src->offset_ -= kMinOffsetForMediumAdjustment;
2029     } else if (-kMaxOffsetForMediumAdjustment <= src->offset() &&
2030                src->offset() < 0) {
2031       addiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2);
2032       addiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2);
2033       src->offset_ += kMinOffsetForMediumAdjustment;
2034     } else {
2035       // Now that all shorter options have been exhausted, load the full 32-bit
2036       // offset.
2037       int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize);
2038       lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask);
2039       ori(scratch, scratch, loaded_offset & kImm16Mask);  // Load 32-bit offset.
2040       addu(scratch, scratch, src->rm());
2041       src->offset_ -= loaded_offset;
2042     }
2043   }
2044   src->rm_ = scratch;
2045 
2046   DCHECK(is_int16(src->offset()));
2047   if (two_accesses) {
2048     DCHECK(is_int16(
2049         static_cast<int32_t>(src->offset() + second_access_add_to_offset)));
2050   }
2051   DCHECK(misalignment == (src->offset() & (kDoubleSize - 1)));
2052 }
2053 
lb(Register rd,const MemOperand & rs)2054 void Assembler::lb(Register rd, const MemOperand& rs) {
2055   MemOperand source = rs;
2056   AdjustBaseAndOffset(&source);
2057   GenInstrImmediate(LB, source.rm(), rd, source.offset());
2058 }
2059 
lbu(Register rd,const MemOperand & rs)2060 void Assembler::lbu(Register rd, const MemOperand& rs) {
2061   MemOperand source = rs;
2062   AdjustBaseAndOffset(&source);
2063   GenInstrImmediate(LBU, source.rm(), rd, source.offset());
2064 }
2065 
lh(Register rd,const MemOperand & rs)2066 void Assembler::lh(Register rd, const MemOperand& rs) {
2067   MemOperand source = rs;
2068   AdjustBaseAndOffset(&source);
2069   GenInstrImmediate(LH, source.rm(), rd, source.offset());
2070 }
2071 
lhu(Register rd,const MemOperand & rs)2072 void Assembler::lhu(Register rd, const MemOperand& rs) {
2073   MemOperand source = rs;
2074   AdjustBaseAndOffset(&source);
2075   GenInstrImmediate(LHU, source.rm(), rd, source.offset());
2076 }
2077 
lw(Register rd,const MemOperand & rs)2078 void Assembler::lw(Register rd, const MemOperand& rs) {
2079   MemOperand source = rs;
2080   AdjustBaseAndOffset(&source);
2081   GenInstrImmediate(LW, source.rm(), rd, source.offset());
2082 }
2083 
lwl(Register rd,const MemOperand & rs)2084 void Assembler::lwl(Register rd, const MemOperand& rs) {
2085   DCHECK(is_int16(rs.offset_));
2086   DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2087          IsMipsArchVariant(kMips32r2));
2088   GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
2089 }
2090 
lwr(Register rd,const MemOperand & rs)2091 void Assembler::lwr(Register rd, const MemOperand& rs) {
2092   DCHECK(is_int16(rs.offset_));
2093   DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2094          IsMipsArchVariant(kMips32r2));
2095   GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
2096 }
2097 
sb(Register rd,const MemOperand & rs)2098 void Assembler::sb(Register rd, const MemOperand& rs) {
2099   MemOperand source = rs;
2100   AdjustBaseAndOffset(&source);
2101   GenInstrImmediate(SB, source.rm(), rd, source.offset());
2102 }
2103 
sh(Register rd,const MemOperand & rs)2104 void Assembler::sh(Register rd, const MemOperand& rs) {
2105   MemOperand source = rs;
2106   AdjustBaseAndOffset(&source);
2107   GenInstrImmediate(SH, source.rm(), rd, source.offset());
2108 }
2109 
sw(Register rd,const MemOperand & rs)2110 void Assembler::sw(Register rd, const MemOperand& rs) {
2111   MemOperand source = rs;
2112   AdjustBaseAndOffset(&source);
2113   GenInstrImmediate(SW, source.rm(), rd, source.offset());
2114 }
2115 
swl(Register rd,const MemOperand & rs)2116 void Assembler::swl(Register rd, const MemOperand& rs) {
2117   DCHECK(is_int16(rs.offset_));
2118   DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2119          IsMipsArchVariant(kMips32r2));
2120   GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
2121 }
2122 
swr(Register rd,const MemOperand & rs)2123 void Assembler::swr(Register rd, const MemOperand& rs) {
2124   DCHECK(is_int16(rs.offset_));
2125   DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2126          IsMipsArchVariant(kMips32r2));
2127   GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
2128 }
2129 
ll(Register rd,const MemOperand & rs)2130 void Assembler::ll(Register rd, const MemOperand& rs) {
2131   if (IsMipsArchVariant(kMips32r6)) {
2132     DCHECK(is_int9(rs.offset_));
2133     GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, LL_R6);
2134   } else {
2135     DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2136            IsMipsArchVariant(kMips32r2));
2137     DCHECK(is_int16(rs.offset_));
2138     GenInstrImmediate(LL, rs.rm(), rd, rs.offset_);
2139   }
2140 }
2141 
sc(Register rd,const MemOperand & rs)2142 void Assembler::sc(Register rd, const MemOperand& rs) {
2143   if (IsMipsArchVariant(kMips32r6)) {
2144     DCHECK(is_int9(rs.offset_));
2145     GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, SC_R6);
2146   } else {
2147     DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2148            IsMipsArchVariant(kMips32r2));
2149     GenInstrImmediate(SC, rs.rm(), rd, rs.offset_);
2150   }
2151 }
2152 
llx(Register rd,const MemOperand & rs)2153 void Assembler::llx(Register rd, const MemOperand& rs) {
2154   DCHECK(IsMipsArchVariant(kMips32r6));
2155   DCHECK(is_int9(rs.offset_));
2156   GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 1, LL_R6);
2157 }
2158 
scx(Register rd,const MemOperand & rs)2159 void Assembler::scx(Register rd, const MemOperand& rs) {
2160   DCHECK(IsMipsArchVariant(kMips32r6));
2161   DCHECK(is_int9(rs.offset_));
2162   GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 1, SC_R6);
2163 }
2164 
lui(Register rd,int32_t j)2165 void Assembler::lui(Register rd, int32_t j) {
2166   DCHECK(is_uint16(j) || is_int16(j));
2167   GenInstrImmediate(LUI, zero_reg, rd, j);
2168 }
2169 
aui(Register rt,Register rs,int32_t j)2170 void Assembler::aui(Register rt, Register rs, int32_t j) {
2171   // This instruction uses same opcode as 'lui'. The difference in encoding is
2172   // 'lui' has zero reg. for rs field.
2173   DCHECK(IsMipsArchVariant(kMips32r6));
2174   DCHECK(rs != zero_reg);
2175   DCHECK(is_uint16(j));
2176   GenInstrImmediate(LUI, rs, rt, j);
2177 }
2178 
2179 // ---------PC-Relative instructions-----------
2180 
addiupc(Register rs,int32_t imm19)2181 void Assembler::addiupc(Register rs, int32_t imm19) {
2182   DCHECK(IsMipsArchVariant(kMips32r6));
2183   DCHECK(rs.is_valid() && is_int19(imm19));
2184   uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
2185   GenInstrImmediate(PCREL, rs, imm21);
2186 }
2187 
lwpc(Register rs,int32_t offset19)2188 void Assembler::lwpc(Register rs, int32_t offset19) {
2189   DCHECK(IsMipsArchVariant(kMips32r6));
2190   DCHECK(rs.is_valid() && is_int19(offset19));
2191   uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
2192   GenInstrImmediate(PCREL, rs, imm21);
2193 }
2194 
auipc(Register rs,int16_t imm16)2195 void Assembler::auipc(Register rs, int16_t imm16) {
2196   DCHECK(IsMipsArchVariant(kMips32r6));
2197   DCHECK(rs.is_valid());
2198   uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
2199   GenInstrImmediate(PCREL, rs, imm21);
2200 }
2201 
aluipc(Register rs,int16_t imm16)2202 void Assembler::aluipc(Register rs, int16_t imm16) {
2203   DCHECK(IsMipsArchVariant(kMips32r6));
2204   DCHECK(rs.is_valid());
2205   uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
2206   GenInstrImmediate(PCREL, rs, imm21);
2207 }
2208 
2209 // -------------Misc-instructions--------------
2210 
2211 // Break / Trap instructions.
break_(uint32_t code,bool break_as_stop)2212 void Assembler::break_(uint32_t code, bool break_as_stop) {
2213   DCHECK_EQ(code & ~0xFFFFF, 0);
2214   // We need to invalidate breaks that could be stops as well because the
2215   // simulator expects a char pointer after the stop instruction.
2216   // See constants-mips.h for explanation.
2217   DCHECK(
2218       (break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) ||
2219       (!break_as_stop && (code > kMaxStopCode || code <= kMaxWatchpointCode)));
2220   Instr break_instr = SPECIAL | BREAK | (code << 6);
2221   emit(break_instr);
2222 }
2223 
stop(uint32_t code)2224 void Assembler::stop(uint32_t code) {
2225   DCHECK_GT(code, kMaxWatchpointCode);
2226   DCHECK_LE(code, kMaxStopCode);
2227 #if V8_HOST_ARCH_MIPS
2228   break_(0x54321);
2229 #else  // V8_HOST_ARCH_MIPS
2230   break_(code, true);
2231 #endif
2232 }
2233 
tge(Register rs,Register rt,uint16_t code)2234 void Assembler::tge(Register rs, Register rt, uint16_t code) {
2235   DCHECK(is_uint10(code));
2236   Instr instr =
2237       SPECIAL | TGE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2238   emit(instr);
2239 }
2240 
tgeu(Register rs,Register rt,uint16_t code)2241 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
2242   DCHECK(is_uint10(code));
2243   Instr instr = SPECIAL | TGEU | rs.code() << kRsShift | rt.code() << kRtShift |
2244                 code << 6;
2245   emit(instr);
2246 }
2247 
tlt(Register rs,Register rt,uint16_t code)2248 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
2249   DCHECK(is_uint10(code));
2250   Instr instr =
2251       SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2252   emit(instr);
2253 }
2254 
tltu(Register rs,Register rt,uint16_t code)2255 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
2256   DCHECK(is_uint10(code));
2257   Instr instr = SPECIAL | TLTU | rs.code() << kRsShift | rt.code() << kRtShift |
2258                 code << 6;
2259   emit(instr);
2260 }
2261 
teq(Register rs,Register rt,uint16_t code)2262 void Assembler::teq(Register rs, Register rt, uint16_t code) {
2263   DCHECK(is_uint10(code));
2264   Instr instr =
2265       SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2266   emit(instr);
2267 }
2268 
tne(Register rs,Register rt,uint16_t code)2269 void Assembler::tne(Register rs, Register rt, uint16_t code) {
2270   DCHECK(is_uint10(code));
2271   Instr instr =
2272       SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2273   emit(instr);
2274 }
2275 
sync()2276 void Assembler::sync() {
2277   Instr sync_instr = SPECIAL | SYNC;
2278   emit(sync_instr);
2279 }
2280 
2281 // Move from HI/LO register.
2282 
mfhi(Register rd)2283 void Assembler::mfhi(Register rd) {
2284   GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
2285 }
2286 
mflo(Register rd)2287 void Assembler::mflo(Register rd) {
2288   GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
2289 }
2290 
2291 // Set on less than instructions.
slt(Register rd,Register rs,Register rt)2292 void Assembler::slt(Register rd, Register rs, Register rt) {
2293   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
2294 }
2295 
sltu(Register rd,Register rs,Register rt)2296 void Assembler::sltu(Register rd, Register rs, Register rt) {
2297   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
2298 }
2299 
slti(Register rt,Register rs,int32_t j)2300 void Assembler::slti(Register rt, Register rs, int32_t j) {
2301   GenInstrImmediate(SLTI, rs, rt, j);
2302 }
2303 
sltiu(Register rt,Register rs,int32_t j)2304 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
2305   GenInstrImmediate(SLTIU, rs, rt, j);
2306 }
2307 
2308 // Conditional move.
movz(Register rd,Register rs,Register rt)2309 void Assembler::movz(Register rd, Register rs, Register rt) {
2310   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2311 }
2312 
movn(Register rd,Register rs,Register rt)2313 void Assembler::movn(Register rd, Register rs, Register rt) {
2314   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2315 }
2316 
movt(Register rd,Register rs,uint16_t cc)2317 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2318   Register rt = Register::from_code((cc & 0x0007) << 2 | 1);
2319   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2320 }
2321 
movf(Register rd,Register rs,uint16_t cc)2322 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2323   Register rt = Register::from_code((cc & 0x0007) << 2 | 0);
2324   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2325 }
2326 
seleqz(Register rd,Register rs,Register rt)2327 void Assembler::seleqz(Register rd, Register rs, Register rt) {
2328   DCHECK(IsMipsArchVariant(kMips32r6));
2329   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2330 }
2331 
2332 // Bit twiddling.
clz(Register rd,Register rs)2333 void Assembler::clz(Register rd, Register rs) {
2334   if (!IsMipsArchVariant(kMips32r6)) {
2335     // Clz instr requires same GPR number in 'rd' and 'rt' fields.
2336     GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2337   } else {
2338     GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2339   }
2340 }
2341 
ins_(Register rt,Register rs,uint16_t pos,uint16_t size)2342 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2343   // Should be called via MacroAssembler::Ins.
2344   // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2345   DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2346   GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2347 }
2348 
ext_(Register rt,Register rs,uint16_t pos,uint16_t size)2349 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2350   // Should be called via MacroAssembler::Ext.
2351   // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
2352   DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2353   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2354 }
2355 
bitswap(Register rd,Register rt)2356 void Assembler::bitswap(Register rd, Register rt) {
2357   DCHECK(IsMipsArchVariant(kMips32r6));
2358   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
2359 }
2360 
pref(int32_t hint,const MemOperand & rs)2361 void Assembler::pref(int32_t hint, const MemOperand& rs) {
2362   DCHECK(!IsMipsArchVariant(kLoongson));
2363   DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2364   Instr instr =
2365       PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) | (rs.offset_);
2366   emit(instr);
2367 }
2368 
align(Register rd,Register rs,Register rt,uint8_t bp)2369 void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
2370   DCHECK(IsMipsArchVariant(kMips32r6));
2371   DCHECK(is_uint3(bp));
2372   uint16_t sa = (ALIGN << kBp2Bits) | bp;
2373   GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
2374 }
2375 
2376 // Byte swap.
wsbh(Register rd,Register rt)2377 void Assembler::wsbh(Register rd, Register rt) {
2378   DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2379   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL);
2380 }
2381 
seh(Register rd,Register rt)2382 void Assembler::seh(Register rd, Register rt) {
2383   DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2384   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL);
2385 }
2386 
seb(Register rd,Register rt)2387 void Assembler::seb(Register rd, Register rt) {
2388   DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2389   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
2390 }
2391 
2392 // --------Coprocessor-instructions----------------
2393 
2394 // Load, store, move.
lwc1(FPURegister fd,const MemOperand & src)2395 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
2396   MemOperand tmp = src;
2397   AdjustBaseAndOffset(&tmp);
2398   GenInstrImmediate(LWC1, tmp.rm(), fd, tmp.offset());
2399 }
2400 
swc1(FPURegister fd,const MemOperand & src)2401 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
2402   MemOperand tmp = src;
2403   AdjustBaseAndOffset(&tmp);
2404   GenInstrImmediate(SWC1, tmp.rm(), fd, tmp.offset());
2405 }
2406 
mtc1(Register rt,FPURegister fs)2407 void Assembler::mtc1(Register rt, FPURegister fs) {
2408   GenInstrRegister(COP1, MTC1, rt, fs, f0);
2409 }
2410 
mthc1(Register rt,FPURegister fs)2411 void Assembler::mthc1(Register rt, FPURegister fs) {
2412   GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2413 }
2414 
mfc1(Register rt,FPURegister fs)2415 void Assembler::mfc1(Register rt, FPURegister fs) {
2416   GenInstrRegister(COP1, MFC1, rt, fs, f0);
2417 }
2418 
mfhc1(Register rt,FPURegister fs)2419 void Assembler::mfhc1(Register rt, FPURegister fs) {
2420   GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2421 }
2422 
ctc1(Register rt,FPUControlRegister fs)2423 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2424   GenInstrRegister(COP1, CTC1, rt, fs);
2425 }
2426 
cfc1(Register rt,FPUControlRegister fs)2427 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2428   GenInstrRegister(COP1, CFC1, rt, fs);
2429 }
2430 
movn_s(FPURegister fd,FPURegister fs,Register rt)2431 void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
2432   DCHECK(!IsMipsArchVariant(kMips32r6));
2433   GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
2434 }
2435 
movn_d(FPURegister fd,FPURegister fs,Register rt)2436 void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
2437   DCHECK(!IsMipsArchVariant(kMips32r6));
2438   GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
2439 }
2440 
sel(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2441 void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
2442                     FPURegister ft) {
2443   DCHECK(IsMipsArchVariant(kMips32r6));
2444   DCHECK((fmt == D) || (fmt == S));
2445 
2446   GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
2447 }
2448 
sel_s(FPURegister fd,FPURegister fs,FPURegister ft)2449 void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2450   sel(S, fd, fs, ft);
2451 }
2452 
sel_d(FPURegister fd,FPURegister fs,FPURegister ft)2453 void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2454   sel(D, fd, fs, ft);
2455 }
2456 
seleqz(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2457 void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
2458                        FPURegister ft) {
2459   DCHECK(IsMipsArchVariant(kMips32r6));
2460   DCHECK((fmt == D) || (fmt == S));
2461   GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
2462 }
2463 
selnez(Register rd,Register rs,Register rt)2464 void Assembler::selnez(Register rd, Register rs, Register rt) {
2465   DCHECK(IsMipsArchVariant(kMips32r6));
2466   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2467 }
2468 
selnez(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2469 void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
2470                        FPURegister ft) {
2471   DCHECK(IsMipsArchVariant(kMips32r6));
2472   DCHECK((fmt == D) || (fmt == S));
2473   GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
2474 }
2475 
seleqz_d(FPURegister fd,FPURegister fs,FPURegister ft)2476 void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2477   seleqz(D, fd, fs, ft);
2478 }
2479 
seleqz_s(FPURegister fd,FPURegister fs,FPURegister ft)2480 void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2481   seleqz(S, fd, fs, ft);
2482 }
2483 
selnez_d(FPURegister fd,FPURegister fs,FPURegister ft)2484 void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2485   selnez(D, fd, fs, ft);
2486 }
2487 
selnez_s(FPURegister fd,FPURegister fs,FPURegister ft)2488 void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2489   selnez(S, fd, fs, ft);
2490 }
2491 
movz_s(FPURegister fd,FPURegister fs,Register rt)2492 void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
2493   DCHECK(!IsMipsArchVariant(kMips32r6));
2494   GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
2495 }
2496 
movz_d(FPURegister fd,FPURegister fs,Register rt)2497 void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
2498   DCHECK(!IsMipsArchVariant(kMips32r6));
2499   GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
2500 }
2501 
movt_s(FPURegister fd,FPURegister fs,uint16_t cc)2502 void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2503   DCHECK(!IsMipsArchVariant(kMips32r6));
2504   FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
2505   GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2506 }
2507 
movt_d(FPURegister fd,FPURegister fs,uint16_t cc)2508 void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2509   DCHECK(!IsMipsArchVariant(kMips32r6));
2510   FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
2511   GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2512 }
2513 
movf_s(FPURegister fd,FPURegister fs,uint16_t cc)2514 void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2515   DCHECK(!IsMipsArchVariant(kMips32r6));
2516   FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
2517   GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2518 }
2519 
movf_d(FPURegister fd,FPURegister fs,uint16_t cc)2520 void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2521   DCHECK(!IsMipsArchVariant(kMips32r6));
2522   FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
2523   GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2524 }
2525 
2526 // Arithmetic.
2527 
add_s(FPURegister fd,FPURegister fs,FPURegister ft)2528 void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2529   GenInstrRegister(COP1, S, ft, fs, fd, ADD_S);
2530 }
2531 
add_d(FPURegister fd,FPURegister fs,FPURegister ft)2532 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2533   GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2534 }
2535 
sub_s(FPURegister fd,FPURegister fs,FPURegister ft)2536 void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2537   GenInstrRegister(COP1, S, ft, fs, fd, SUB_S);
2538 }
2539 
sub_d(FPURegister fd,FPURegister fs,FPURegister ft)2540 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2541   GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2542 }
2543 
mul_s(FPURegister fd,FPURegister fs,FPURegister ft)2544 void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2545   GenInstrRegister(COP1, S, ft, fs, fd, MUL_S);
2546 }
2547 
mul_d(FPURegister fd,FPURegister fs,FPURegister ft)2548 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2549   GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2550 }
2551 
madd_s(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2552 void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
2553                        FPURegister ft) {
2554   DCHECK(IsMipsArchVariant(kMips32r2));
2555   GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_S);
2556 }
2557 
madd_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2558 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2559                        FPURegister ft) {
2560   DCHECK(IsMipsArchVariant(kMips32r2));
2561   GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
2562 }
2563 
msub_s(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2564 void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
2565                        FPURegister ft) {
2566   DCHECK(IsMipsArchVariant(kMips32r2));
2567   GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_S);
2568 }
2569 
msub_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2570 void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
2571                        FPURegister ft) {
2572   DCHECK(IsMipsArchVariant(kMips32r2));
2573   GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_D);
2574 }
2575 
maddf_s(FPURegister fd,FPURegister fs,FPURegister ft)2576 void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2577   DCHECK(IsMipsArchVariant(kMips32r6));
2578   GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S);
2579 }
2580 
maddf_d(FPURegister fd,FPURegister fs,FPURegister ft)2581 void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2582   DCHECK(IsMipsArchVariant(kMips32r6));
2583   GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D);
2584 }
2585 
msubf_s(FPURegister fd,FPURegister fs,FPURegister ft)2586 void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2587   DCHECK(IsMipsArchVariant(kMips32r6));
2588   GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S);
2589 }
2590 
msubf_d(FPURegister fd,FPURegister fs,FPURegister ft)2591 void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2592   DCHECK(IsMipsArchVariant(kMips32r6));
2593   GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D);
2594 }
2595 
div_s(FPURegister fd,FPURegister fs,FPURegister ft)2596 void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2597   GenInstrRegister(COP1, S, ft, fs, fd, DIV_S);
2598 }
2599 
div_d(FPURegister fd,FPURegister fs,FPURegister ft)2600 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2601   GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2602 }
2603 
abs_s(FPURegister fd,FPURegister fs)2604 void Assembler::abs_s(FPURegister fd, FPURegister fs) {
2605   GenInstrRegister(COP1, S, f0, fs, fd, ABS_S);
2606 }
2607 
abs_d(FPURegister fd,FPURegister fs)2608 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2609   GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2610 }
2611 
mov_d(FPURegister fd,FPURegister fs)2612 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2613   GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2614 }
2615 
mov_s(FPURegister fd,FPURegister fs)2616 void Assembler::mov_s(FPURegister fd, FPURegister fs) {
2617   GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
2618 }
2619 
neg_s(FPURegister fd,FPURegister fs)2620 void Assembler::neg_s(FPURegister fd, FPURegister fs) {
2621   GenInstrRegister(COP1, S, f0, fs, fd, NEG_S);
2622 }
2623 
neg_d(FPURegister fd,FPURegister fs)2624 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2625   GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2626 }
2627 
sqrt_s(FPURegister fd,FPURegister fs)2628 void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
2629   GenInstrRegister(COP1, S, f0, fs, fd, SQRT_S);
2630 }
2631 
sqrt_d(FPURegister fd,FPURegister fs)2632 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2633   GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
2634 }
2635 
rsqrt_s(FPURegister fd,FPURegister fs)2636 void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
2637   DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2638   GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
2639 }
2640 
rsqrt_d(FPURegister fd,FPURegister fs)2641 void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
2642   DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2643   GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
2644 }
2645 
recip_d(FPURegister fd,FPURegister fs)2646 void Assembler::recip_d(FPURegister fd, FPURegister fs) {
2647   DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2648   GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
2649 }
2650 
recip_s(FPURegister fd,FPURegister fs)2651 void Assembler::recip_s(FPURegister fd, FPURegister fs) {
2652   DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2653   GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
2654 }
2655 
2656 // Conversions.
2657 
cvt_w_s(FPURegister fd,FPURegister fs)2658 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2659   GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2660 }
2661 
cvt_w_d(FPURegister fd,FPURegister fs)2662 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2663   GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2664 }
2665 
trunc_w_s(FPURegister fd,FPURegister fs)2666 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2667   GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2668 }
2669 
trunc_w_d(FPURegister fd,FPURegister fs)2670 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2671   GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2672 }
2673 
round_w_s(FPURegister fd,FPURegister fs)2674 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2675   GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2676 }
2677 
round_w_d(FPURegister fd,FPURegister fs)2678 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2679   GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2680 }
2681 
floor_w_s(FPURegister fd,FPURegister fs)2682 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2683   GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2684 }
2685 
floor_w_d(FPURegister fd,FPURegister fs)2686 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2687   GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2688 }
2689 
ceil_w_s(FPURegister fd,FPURegister fs)2690 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2691   GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2692 }
2693 
ceil_w_d(FPURegister fd,FPURegister fs)2694 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2695   GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2696 }
2697 
rint_s(FPURegister fd,FPURegister fs)2698 void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
2699 
rint(SecondaryField fmt,FPURegister fd,FPURegister fs)2700 void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
2701   DCHECK(IsMipsArchVariant(kMips32r6));
2702   DCHECK((fmt == D) || (fmt == S));
2703   GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
2704 }
2705 
rint_d(FPURegister fd,FPURegister fs)2706 void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
2707 
cvt_l_s(FPURegister fd,FPURegister fs)2708 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
2709   DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2710          IsFp64Mode());
2711   GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
2712 }
2713 
cvt_l_d(FPURegister fd,FPURegister fs)2714 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
2715   DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2716          IsFp64Mode());
2717   GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
2718 }
2719 
trunc_l_s(FPURegister fd,FPURegister fs)2720 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
2721   DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2722          IsFp64Mode());
2723   GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
2724 }
2725 
trunc_l_d(FPURegister fd,FPURegister fs)2726 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
2727   DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2728          IsFp64Mode());
2729   GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
2730 }
2731 
round_l_s(FPURegister fd,FPURegister fs)2732 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
2733   DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2734          IsFp64Mode());
2735   GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
2736 }
2737 
round_l_d(FPURegister fd,FPURegister fs)2738 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
2739   DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2740          IsFp64Mode());
2741   GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
2742 }
2743 
floor_l_s(FPURegister fd,FPURegister fs)2744 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
2745   DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2746          IsFp64Mode());
2747   GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
2748 }
2749 
floor_l_d(FPURegister fd,FPURegister fs)2750 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
2751   DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2752          IsFp64Mode());
2753   GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
2754 }
2755 
ceil_l_s(FPURegister fd,FPURegister fs)2756 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
2757   DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2758          IsFp64Mode());
2759   GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
2760 }
2761 
ceil_l_d(FPURegister fd,FPURegister fs)2762 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
2763   DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2764          IsFp64Mode());
2765   GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
2766 }
2767 
class_s(FPURegister fd,FPURegister fs)2768 void Assembler::class_s(FPURegister fd, FPURegister fs) {
2769   DCHECK(IsMipsArchVariant(kMips32r6));
2770   GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
2771 }
2772 
class_d(FPURegister fd,FPURegister fs)2773 void Assembler::class_d(FPURegister fd, FPURegister fs) {
2774   DCHECK(IsMipsArchVariant(kMips32r6));
2775   GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
2776 }
2777 
min(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2778 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
2779                     FPURegister ft) {
2780   DCHECK(IsMipsArchVariant(kMips32r6));
2781   DCHECK((fmt == D) || (fmt == S));
2782   GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2783 }
2784 
mina(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2785 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
2786                      FPURegister ft) {
2787   DCHECK(IsMipsArchVariant(kMips32r6));
2788   DCHECK((fmt == D) || (fmt == S));
2789   GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
2790 }
2791 
max(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2792 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
2793                     FPURegister ft) {
2794   DCHECK(IsMipsArchVariant(kMips32r6));
2795   DCHECK((fmt == D) || (fmt == S));
2796   GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2797 }
2798 
maxa(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2799 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
2800                      FPURegister ft) {
2801   DCHECK(IsMipsArchVariant(kMips32r6));
2802   DCHECK((fmt == D) || (fmt == S));
2803   GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
2804 }
2805 
min_s(FPURegister fd,FPURegister fs,FPURegister ft)2806 void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2807   min(S, fd, fs, ft);
2808 }
2809 
min_d(FPURegister fd,FPURegister fs,FPURegister ft)2810 void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2811   min(D, fd, fs, ft);
2812 }
2813 
max_s(FPURegister fd,FPURegister fs,FPURegister ft)2814 void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2815   max(S, fd, fs, ft);
2816 }
2817 
max_d(FPURegister fd,FPURegister fs,FPURegister ft)2818 void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2819   max(D, fd, fs, ft);
2820 }
2821 
mina_s(FPURegister fd,FPURegister fs,FPURegister ft)2822 void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2823   mina(S, fd, fs, ft);
2824 }
2825 
mina_d(FPURegister fd,FPURegister fs,FPURegister ft)2826 void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2827   mina(D, fd, fs, ft);
2828 }
2829 
maxa_s(FPURegister fd,FPURegister fs,FPURegister ft)2830 void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2831   maxa(S, fd, fs, ft);
2832 }
2833 
maxa_d(FPURegister fd,FPURegister fs,FPURegister ft)2834 void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2835   maxa(D, fd, fs, ft);
2836 }
2837 
cvt_s_w(FPURegister fd,FPURegister fs)2838 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
2839   GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
2840 }
2841 
cvt_s_l(FPURegister fd,FPURegister fs)2842 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
2843   DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2844          IsFp64Mode());
2845   GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
2846 }
2847 
cvt_s_d(FPURegister fd,FPURegister fs)2848 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
2849   GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
2850 }
2851 
cvt_d_w(FPURegister fd,FPURegister fs)2852 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
2853   GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
2854 }
2855 
cvt_d_l(FPURegister fd,FPURegister fs)2856 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
2857   DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2858          IsFp64Mode());
2859   GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
2860 }
2861 
cvt_d_s(FPURegister fd,FPURegister fs)2862 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
2863   GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
2864 }
2865 
2866 // Conditions for >= MIPSr6.
cmp(FPUCondition cond,SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2867 void Assembler::cmp(FPUCondition cond, SecondaryField fmt, FPURegister fd,
2868                     FPURegister fs, FPURegister ft) {
2869   DCHECK(IsMipsArchVariant(kMips32r6));
2870   DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
2871   Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift |
2872                 fd.code() << kFdShift | (0 << 5) | cond;
2873   emit(instr);
2874 }
2875 
cmp_s(FPUCondition cond,FPURegister fd,FPURegister fs,FPURegister ft)2876 void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
2877                       FPURegister ft) {
2878   cmp(cond, W, fd, fs, ft);
2879 }
2880 
cmp_d(FPUCondition cond,FPURegister fd,FPURegister fs,FPURegister ft)2881 void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
2882                       FPURegister ft) {
2883   cmp(cond, L, fd, fs, ft);
2884 }
2885 
bc1eqz(int16_t offset,FPURegister ft)2886 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
2887   DCHECK(IsMipsArchVariant(kMips32r6));
2888   BlockTrampolinePoolScope block_trampoline_pool(this);
2889   Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
2890   emit(instr);
2891   BlockTrampolinePoolFor(1);  // For associated delay slot.
2892 }
2893 
bc1nez(int16_t offset,FPURegister ft)2894 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
2895   DCHECK(IsMipsArchVariant(kMips32r6));
2896   BlockTrampolinePoolScope block_trampoline_pool(this);
2897   Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
2898   emit(instr);
2899   BlockTrampolinePoolFor(1);  // For associated delay slot.
2900 }
2901 
2902 // Conditions for < MIPSr6.
c(FPUCondition cond,SecondaryField fmt,FPURegister fs,FPURegister ft,uint16_t cc)2903 void Assembler::c(FPUCondition cond, SecondaryField fmt, FPURegister fs,
2904                   FPURegister ft, uint16_t cc) {
2905   DCHECK(is_uint3(cc));
2906   DCHECK(fmt == S || fmt == D);
2907   DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
2908   Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift | cc << 8 |
2909                 3 << 4 | cond;
2910   emit(instr);
2911 }
2912 
c_s(FPUCondition cond,FPURegister fs,FPURegister ft,uint16_t cc)2913 void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
2914                     uint16_t cc) {
2915   c(cond, S, fs, ft, cc);
2916 }
2917 
c_d(FPUCondition cond,FPURegister fs,FPURegister ft,uint16_t cc)2918 void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
2919                     uint16_t cc) {
2920   c(cond, D, fs, ft, cc);
2921 }
2922 
fcmp(FPURegister src1,const double src2,FPUCondition cond)2923 void Assembler::fcmp(FPURegister src1, const double src2, FPUCondition cond) {
2924   DCHECK_EQ(src2, 0.0);
2925   mtc1(zero_reg, f14);
2926   cvt_d_w(f14, f14);
2927   c(cond, D, src1, f14, 0);
2928 }
2929 
bc1f(int16_t offset,uint16_t cc)2930 void Assembler::bc1f(int16_t offset, uint16_t cc) {
2931   BlockTrampolinePoolScope block_trampoline_pool(this);
2932   DCHECK(is_uint3(cc));
2933   Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
2934   emit(instr);
2935   BlockTrampolinePoolFor(1);  // For associated delay slot.
2936 }
2937 
bc1t(int16_t offset,uint16_t cc)2938 void Assembler::bc1t(int16_t offset, uint16_t cc) {
2939   BlockTrampolinePoolScope block_trampoline_pool(this);
2940   DCHECK(is_uint3(cc));
2941   Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
2942   emit(instr);
2943   BlockTrampolinePoolFor(1);  // For associated delay slot.
2944 }
2945 
2946 // ---------- MSA instructions ------------
2947 #define MSA_BRANCH_LIST(V) \
2948   V(bz_v, BZ_V)            \
2949   V(bz_b, BZ_B)            \
2950   V(bz_h, BZ_H)            \
2951   V(bz_w, BZ_W)            \
2952   V(bz_d, BZ_D)            \
2953   V(bnz_v, BNZ_V)          \
2954   V(bnz_b, BNZ_B)          \
2955   V(bnz_h, BNZ_H)          \
2956   V(bnz_w, BNZ_W)          \
2957   V(bnz_d, BNZ_D)
2958 
2959 #define MSA_BRANCH(name, opcode)                         \
2960   void Assembler::name(MSARegister wt, int16_t offset) { \
2961     GenInstrMsaBranch(opcode, wt, offset);               \
2962   }
2963 
2964 MSA_BRANCH_LIST(MSA_BRANCH)
2965 #undef MSA_BRANCH
2966 #undef MSA_BRANCH_LIST
2967 
2968 #define MSA_LD_ST_LIST(V) \
2969   V(ld_b, LD_B)           \
2970   V(ld_h, LD_H)           \
2971   V(ld_w, LD_W)           \
2972   V(ld_d, LD_D)           \
2973   V(st_b, ST_B)           \
2974   V(st_h, ST_H)           \
2975   V(st_w, ST_W)           \
2976   V(st_d, ST_D)
2977 
2978 #define MSA_LD_ST(name, opcode)                                  \
2979   void Assembler::name(MSARegister wd, const MemOperand& rs) {   \
2980     MemOperand source = rs;                                      \
2981     AdjustBaseAndOffset(&source);                                 \
2982     if (is_int10(source.offset())) {                             \
2983       GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \
2984     } else {                                                     \
2985       UseScratchRegisterScope temps(this);                       \
2986       Register scratch = temps.Acquire();                        \
2987       DCHECK(rs.rm() != scratch);                                \
2988       addiu(scratch, source.rm(), source.offset());              \
2989       GenInstrMsaMI10(opcode, 0, scratch, wd);                   \
2990     }                                                            \
2991   }
2992 
MSA_LD_ST_LIST(MSA_LD_ST)2993 MSA_LD_ST_LIST(MSA_LD_ST)
2994 #undef MSA_LD_ST
2995 #undef MSA_LD_ST_LIST
2996 
2997 #define MSA_I10_LIST(V) \
2998   V(ldi_b, I5_DF_b)     \
2999   V(ldi_h, I5_DF_h)     \
3000   V(ldi_w, I5_DF_w)     \
3001   V(ldi_d, I5_DF_d)
3002 
3003 #define MSA_I10(name, format)                           \
3004   void Assembler::name(MSARegister wd, int32_t imm10) { \
3005     GenInstrMsaI10(LDI, format, imm10, wd);             \
3006   }
3007 MSA_I10_LIST(MSA_I10)
3008 #undef MSA_I10
3009 #undef MSA_I10_LIST
3010 
3011 #define MSA_I5_LIST(V) \
3012   V(addvi, ADDVI)      \
3013   V(subvi, SUBVI)      \
3014   V(maxi_s, MAXI_S)    \
3015   V(maxi_u, MAXI_U)    \
3016   V(mini_s, MINI_S)    \
3017   V(mini_u, MINI_U)    \
3018   V(ceqi, CEQI)        \
3019   V(clti_s, CLTI_S)    \
3020   V(clti_u, CLTI_U)    \
3021   V(clei_s, CLEI_S)    \
3022   V(clei_u, CLEI_U)
3023 
3024 #define MSA_I5_FORMAT(name, opcode, format)                       \
3025   void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3026                                   uint32_t imm5) {                \
3027     GenInstrMsaI5(opcode, I5_DF_##format, imm5, ws, wd);          \
3028   }
3029 
3030 #define MSA_I5(name, opcode)     \
3031   MSA_I5_FORMAT(name, opcode, b) \
3032   MSA_I5_FORMAT(name, opcode, h) \
3033   MSA_I5_FORMAT(name, opcode, w) \
3034   MSA_I5_FORMAT(name, opcode, d)
3035 
3036 MSA_I5_LIST(MSA_I5)
3037 #undef MSA_I5
3038 #undef MSA_I5_FORMAT
3039 #undef MSA_I5_LIST
3040 
3041 #define MSA_I8_LIST(V) \
3042   V(andi_b, ANDI_B)    \
3043   V(ori_b, ORI_B)      \
3044   V(nori_b, NORI_B)    \
3045   V(xori_b, XORI_B)    \
3046   V(bmnzi_b, BMNZI_B)  \
3047   V(bmzi_b, BMZI_B)    \
3048   V(bseli_b, BSELI_B)  \
3049   V(shf_b, SHF_B)      \
3050   V(shf_h, SHF_H)      \
3051   V(shf_w, SHF_W)
3052 
3053 #define MSA_I8(name, opcode)                                            \
3054   void Assembler::name(MSARegister wd, MSARegister ws, uint32_t imm8) { \
3055     GenInstrMsaI8(opcode, imm8, ws, wd);                                \
3056   }
3057 
3058 MSA_I8_LIST(MSA_I8)
3059 #undef MSA_I8
3060 #undef MSA_I8_LIST
3061 
3062 #define MSA_VEC_LIST(V) \
3063   V(and_v, AND_V)       \
3064   V(or_v, OR_V)         \
3065   V(nor_v, NOR_V)       \
3066   V(xor_v, XOR_V)       \
3067   V(bmnz_v, BMNZ_V)     \
3068   V(bmz_v, BMZ_V)       \
3069   V(bsel_v, BSEL_V)
3070 
3071 #define MSA_VEC(name, opcode)                                            \
3072   void Assembler::name(MSARegister wd, MSARegister ws, MSARegister wt) { \
3073     GenInstrMsaVec(opcode, wt, ws, wd);                                  \
3074   }
3075 
3076 MSA_VEC_LIST(MSA_VEC)
3077 #undef MSA_VEC
3078 #undef MSA_VEC_LIST
3079 
3080 #define MSA_2R_LIST(V) \
3081   V(pcnt, PCNT)        \
3082   V(nloc, NLOC)        \
3083   V(nlzc, NLZC)
3084 
3085 #define MSA_2R_FORMAT(name, opcode, format)                         \
3086   void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \
3087     GenInstrMsa2R(opcode, MSA_2R_DF_##format, ws, wd);              \
3088   }
3089 
3090 #define MSA_2R(name, opcode)     \
3091   MSA_2R_FORMAT(name, opcode, b) \
3092   MSA_2R_FORMAT(name, opcode, h) \
3093   MSA_2R_FORMAT(name, opcode, w) \
3094   MSA_2R_FORMAT(name, opcode, d)
3095 
3096 MSA_2R_LIST(MSA_2R)
3097 #undef MSA_2R
3098 #undef MSA_2R_FORMAT
3099 #undef MSA_2R_LIST
3100 
3101 #define MSA_FILL(format)                                              \
3102   void Assembler::fill_##format(MSARegister wd, Register rs) {        \
3103     DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));     \
3104     DCHECK(rs.is_valid() && wd.is_valid());                           \
3105     Instr instr = MSA | MSA_2R_FORMAT | FILL | MSA_2R_DF_##format |   \
3106                   (rs.code() << kWsShift) | (wd.code() << kWdShift) | \
3107                   MSA_VEC_2R_2RF_MINOR;                               \
3108     emit(instr);                                                      \
3109   }
3110 
3111 MSA_FILL(b)
3112 MSA_FILL(h)
3113 MSA_FILL(w)
3114 #undef MSA_FILL
3115 
3116 #define MSA_2RF_LIST(V) \
3117   V(fclass, FCLASS)     \
3118   V(ftrunc_s, FTRUNC_S) \
3119   V(ftrunc_u, FTRUNC_U) \
3120   V(fsqrt, FSQRT)       \
3121   V(frsqrt, FRSQRT)     \
3122   V(frcp, FRCP)         \
3123   V(frint, FRINT)       \
3124   V(flog2, FLOG2)       \
3125   V(fexupl, FEXUPL)     \
3126   V(fexupr, FEXUPR)     \
3127   V(ffql, FFQL)         \
3128   V(ffqr, FFQR)         \
3129   V(ftint_s, FTINT_S)   \
3130   V(ftint_u, FTINT_U)   \
3131   V(ffint_s, FFINT_S)   \
3132   V(ffint_u, FFINT_U)
3133 
3134 #define MSA_2RF_FORMAT(name, opcode, format)                        \
3135   void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \
3136     GenInstrMsa2RF(opcode, MSA_2RF_DF_##format, ws, wd);            \
3137   }
3138 
3139 #define MSA_2RF(name, opcode)     \
3140   MSA_2RF_FORMAT(name, opcode, w) \
3141   MSA_2RF_FORMAT(name, opcode, d)
3142 
3143 MSA_2RF_LIST(MSA_2RF)
3144 #undef MSA_2RF
3145 #undef MSA_2RF_FORMAT
3146 #undef MSA_2RF_LIST
3147 
3148 #define MSA_3R_LIST(V)  \
3149   V(sll, SLL_MSA)       \
3150   V(sra, SRA_MSA)       \
3151   V(srl, SRL_MSA)       \
3152   V(bclr, BCLR)         \
3153   V(bset, BSET)         \
3154   V(bneg, BNEG)         \
3155   V(binsl, BINSL)       \
3156   V(binsr, BINSR)       \
3157   V(addv, ADDV)         \
3158   V(subv, SUBV)         \
3159   V(max_s, MAX_S)       \
3160   V(max_u, MAX_U)       \
3161   V(min_s, MIN_S)       \
3162   V(min_u, MIN_U)       \
3163   V(max_a, MAX_A)       \
3164   V(min_a, MIN_A)       \
3165   V(ceq, CEQ)           \
3166   V(clt_s, CLT_S)       \
3167   V(clt_u, CLT_U)       \
3168   V(cle_s, CLE_S)       \
3169   V(cle_u, CLE_U)       \
3170   V(add_a, ADD_A)       \
3171   V(adds_a, ADDS_A)     \
3172   V(adds_s, ADDS_S)     \
3173   V(adds_u, ADDS_U)     \
3174   V(ave_s, AVE_S)       \
3175   V(ave_u, AVE_U)       \
3176   V(aver_s, AVER_S)     \
3177   V(aver_u, AVER_U)     \
3178   V(subs_s, SUBS_S)     \
3179   V(subs_u, SUBS_U)     \
3180   V(subsus_u, SUBSUS_U) \
3181   V(subsuu_s, SUBSUU_S) \
3182   V(asub_s, ASUB_S)     \
3183   V(asub_u, ASUB_U)     \
3184   V(mulv, MULV)         \
3185   V(maddv, MADDV)       \
3186   V(msubv, MSUBV)       \
3187   V(div_s, DIV_S_MSA)   \
3188   V(div_u, DIV_U)       \
3189   V(mod_s, MOD_S)       \
3190   V(mod_u, MOD_U)       \
3191   V(dotp_s, DOTP_S)     \
3192   V(dotp_u, DOTP_U)     \
3193   V(dpadd_s, DPADD_S)   \
3194   V(dpadd_u, DPADD_U)   \
3195   V(dpsub_s, DPSUB_S)   \
3196   V(dpsub_u, DPSUB_U)   \
3197   V(pckev, PCKEV)       \
3198   V(pckod, PCKOD)       \
3199   V(ilvl, ILVL)         \
3200   V(ilvr, ILVR)         \
3201   V(ilvev, ILVEV)       \
3202   V(ilvod, ILVOD)       \
3203   V(vshf, VSHF)         \
3204   V(srar, SRAR)         \
3205   V(srlr, SRLR)         \
3206   V(hadd_s, HADD_S)     \
3207   V(hadd_u, HADD_U)     \
3208   V(hsub_s, HSUB_S)     \
3209   V(hsub_u, HSUB_U)
3210 
3211 #define MSA_3R_FORMAT(name, opcode, format)                             \
3212   void Assembler::name##_##format(MSARegister wd, MSARegister ws,       \
3213                                   MSARegister wt) {                     \
3214     GenInstrMsa3R<MSARegister>(opcode, MSA_3R_DF_##format, wt, ws, wd); \
3215   }
3216 
3217 #define MSA_3R_FORMAT_SLD_SPLAT(name, opcode, format)                \
3218   void Assembler::name##_##format(MSARegister wd, MSARegister ws,    \
3219                                   Register rt) {                     \
3220     GenInstrMsa3R<Register>(opcode, MSA_3R_DF_##format, rt, ws, wd); \
3221   }
3222 
3223 #define MSA_3R(name, opcode)     \
3224   MSA_3R_FORMAT(name, opcode, b) \
3225   MSA_3R_FORMAT(name, opcode, h) \
3226   MSA_3R_FORMAT(name, opcode, w) \
3227   MSA_3R_FORMAT(name, opcode, d)
3228 
3229 #define MSA_3R_SLD_SPLAT(name, opcode)     \
3230   MSA_3R_FORMAT_SLD_SPLAT(name, opcode, b) \
3231   MSA_3R_FORMAT_SLD_SPLAT(name, opcode, h) \
3232   MSA_3R_FORMAT_SLD_SPLAT(name, opcode, w) \
3233   MSA_3R_FORMAT_SLD_SPLAT(name, opcode, d)
3234 
3235 MSA_3R_LIST(MSA_3R)
3236 MSA_3R_SLD_SPLAT(sld, SLD)
3237 MSA_3R_SLD_SPLAT(splat, SPLAT)
3238 
3239 #undef MSA_3R
3240 #undef MSA_3R_FORMAT
3241 #undef MSA_3R_FORMAT_SLD_SPLAT
3242 #undef MSA_3R_SLD_SPLAT
3243 #undef MSA_3R_LIST
3244 
3245 #define MSA_3RF_LIST1(V) \
3246   V(fcaf, FCAF)          \
3247   V(fcun, FCUN)          \
3248   V(fceq, FCEQ)          \
3249   V(fcueq, FCUEQ)        \
3250   V(fclt, FCLT)          \
3251   V(fcult, FCULT)        \
3252   V(fcle, FCLE)          \
3253   V(fcule, FCULE)        \
3254   V(fsaf, FSAF)          \
3255   V(fsun, FSUN)          \
3256   V(fseq, FSEQ)          \
3257   V(fsueq, FSUEQ)        \
3258   V(fslt, FSLT)          \
3259   V(fsult, FSULT)        \
3260   V(fsle, FSLE)          \
3261   V(fsule, FSULE)        \
3262   V(fadd, FADD)          \
3263   V(fsub, FSUB)          \
3264   V(fmul, FMUL)          \
3265   V(fdiv, FDIV)          \
3266   V(fmadd, FMADD)        \
3267   V(fmsub, FMSUB)        \
3268   V(fexp2, FEXP2)        \
3269   V(fmin, FMIN)          \
3270   V(fmin_a, FMIN_A)      \
3271   V(fmax, FMAX)          \
3272   V(fmax_a, FMAX_A)      \
3273   V(fcor, FCOR)          \
3274   V(fcune, FCUNE)        \
3275   V(fcne, FCNE)          \
3276   V(fsor, FSOR)          \
3277   V(fsune, FSUNE)        \
3278   V(fsne, FSNE)
3279 
3280 #define MSA_3RF_LIST2(V) \
3281   V(fexdo, FEXDO)        \
3282   V(ftq, FTQ)            \
3283   V(mul_q, MUL_Q)        \
3284   V(madd_q, MADD_Q)      \
3285   V(msub_q, MSUB_Q)      \
3286   V(mulr_q, MULR_Q)      \
3287   V(maddr_q, MADDR_Q)    \
3288   V(msubr_q, MSUBR_Q)
3289 
3290 #define MSA_3RF_FORMAT(name, opcode, df, df_c)                \
3291   void Assembler::name##_##df(MSARegister wd, MSARegister ws, \
3292                               MSARegister wt) {               \
3293     GenInstrMsa3RF(opcode, df_c, wt, ws, wd);                 \
3294   }
3295 
3296 #define MSA_3RF_1(name, opcode)      \
3297   MSA_3RF_FORMAT(name, opcode, w, 0) \
3298   MSA_3RF_FORMAT(name, opcode, d, 1)
3299 
3300 #define MSA_3RF_2(name, opcode)      \
3301   MSA_3RF_FORMAT(name, opcode, h, 0) \
3302   MSA_3RF_FORMAT(name, opcode, w, 1)
3303 
3304 MSA_3RF_LIST1(MSA_3RF_1)
3305 MSA_3RF_LIST2(MSA_3RF_2)
3306 #undef MSA_3RF_1
3307 #undef MSA_3RF_2
3308 #undef MSA_3RF_FORMAT
3309 #undef MSA_3RF_LIST1
3310 #undef MSA_3RF_LIST2
3311 
3312 void Assembler::sldi_b(MSARegister wd, MSARegister ws, uint32_t n) {
3313   GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_B, n, ws, wd);
3314 }
3315 
sldi_h(MSARegister wd,MSARegister ws,uint32_t n)3316 void Assembler::sldi_h(MSARegister wd, MSARegister ws, uint32_t n) {
3317   GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_H, n, ws, wd);
3318 }
3319 
sldi_w(MSARegister wd,MSARegister ws,uint32_t n)3320 void Assembler::sldi_w(MSARegister wd, MSARegister ws, uint32_t n) {
3321   GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_W, n, ws, wd);
3322 }
3323 
sldi_d(MSARegister wd,MSARegister ws,uint32_t n)3324 void Assembler::sldi_d(MSARegister wd, MSARegister ws, uint32_t n) {
3325   GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_D, n, ws, wd);
3326 }
3327 
splati_b(MSARegister wd,MSARegister ws,uint32_t n)3328 void Assembler::splati_b(MSARegister wd, MSARegister ws, uint32_t n) {
3329   GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_B, n, ws, wd);
3330 }
3331 
splati_h(MSARegister wd,MSARegister ws,uint32_t n)3332 void Assembler::splati_h(MSARegister wd, MSARegister ws, uint32_t n) {
3333   GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_H, n, ws, wd);
3334 }
3335 
splati_w(MSARegister wd,MSARegister ws,uint32_t n)3336 void Assembler::splati_w(MSARegister wd, MSARegister ws, uint32_t n) {
3337   GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_W, n, ws, wd);
3338 }
3339 
splati_d(MSARegister wd,MSARegister ws,uint32_t n)3340 void Assembler::splati_d(MSARegister wd, MSARegister ws, uint32_t n) {
3341   GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_D, n, ws, wd);
3342 }
3343 
copy_s_b(Register rd,MSARegister ws,uint32_t n)3344 void Assembler::copy_s_b(Register rd, MSARegister ws, uint32_t n) {
3345   GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_B, n, ws, rd);
3346 }
3347 
copy_s_h(Register rd,MSARegister ws,uint32_t n)3348 void Assembler::copy_s_h(Register rd, MSARegister ws, uint32_t n) {
3349   GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_H, n, ws, rd);
3350 }
3351 
copy_s_w(Register rd,MSARegister ws,uint32_t n)3352 void Assembler::copy_s_w(Register rd, MSARegister ws, uint32_t n) {
3353   GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_W, n, ws, rd);
3354 }
3355 
copy_u_b(Register rd,MSARegister ws,uint32_t n)3356 void Assembler::copy_u_b(Register rd, MSARegister ws, uint32_t n) {
3357   GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_B, n, ws, rd);
3358 }
3359 
copy_u_h(Register rd,MSARegister ws,uint32_t n)3360 void Assembler::copy_u_h(Register rd, MSARegister ws, uint32_t n) {
3361   GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_H, n, ws, rd);
3362 }
3363 
copy_u_w(Register rd,MSARegister ws,uint32_t n)3364 void Assembler::copy_u_w(Register rd, MSARegister ws, uint32_t n) {
3365   GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_W, n, ws, rd);
3366 }
3367 
insert_b(MSARegister wd,uint32_t n,Register rs)3368 void Assembler::insert_b(MSARegister wd, uint32_t n, Register rs) {
3369   GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_B, n, rs, wd);
3370 }
3371 
insert_h(MSARegister wd,uint32_t n,Register rs)3372 void Assembler::insert_h(MSARegister wd, uint32_t n, Register rs) {
3373   GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_H, n, rs, wd);
3374 }
3375 
insert_w(MSARegister wd,uint32_t n,Register rs)3376 void Assembler::insert_w(MSARegister wd, uint32_t n, Register rs) {
3377   GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_W, n, rs, wd);
3378 }
3379 
insve_b(MSARegister wd,uint32_t n,MSARegister ws)3380 void Assembler::insve_b(MSARegister wd, uint32_t n, MSARegister ws) {
3381   GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_B, n, ws, wd);
3382 }
3383 
insve_h(MSARegister wd,uint32_t n,MSARegister ws)3384 void Assembler::insve_h(MSARegister wd, uint32_t n, MSARegister ws) {
3385   GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_H, n, ws, wd);
3386 }
3387 
insve_w(MSARegister wd,uint32_t n,MSARegister ws)3388 void Assembler::insve_w(MSARegister wd, uint32_t n, MSARegister ws) {
3389   GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_W, n, ws, wd);
3390 }
3391 
insve_d(MSARegister wd,uint32_t n,MSARegister ws)3392 void Assembler::insve_d(MSARegister wd, uint32_t n, MSARegister ws) {
3393   GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_D, n, ws, wd);
3394 }
3395 
move_v(MSARegister wd,MSARegister ws)3396 void Assembler::move_v(MSARegister wd, MSARegister ws) {
3397   DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
3398   DCHECK(ws.is_valid() && wd.is_valid());
3399   Instr instr = MSA | MOVE_V | (ws.code() << kWsShift) |
3400                 (wd.code() << kWdShift) | MSA_ELM_MINOR;
3401   emit(instr);
3402 }
3403 
ctcmsa(MSAControlRegister cd,Register rs)3404 void Assembler::ctcmsa(MSAControlRegister cd, Register rs) {
3405   DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
3406   DCHECK(cd.is_valid() && rs.is_valid());
3407   Instr instr = MSA | CTCMSA | (rs.code() << kWsShift) |
3408                 (cd.code() << kWdShift) | MSA_ELM_MINOR;
3409   emit(instr);
3410 }
3411 
cfcmsa(Register rd,MSAControlRegister cs)3412 void Assembler::cfcmsa(Register rd, MSAControlRegister cs) {
3413   DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
3414   DCHECK(rd.is_valid() && cs.is_valid());
3415   Instr instr = MSA | CFCMSA | (cs.code() << kWsShift) |
3416                 (rd.code() << kWdShift) | MSA_ELM_MINOR;
3417   emit(instr);
3418 }
3419 
3420 #define MSA_BIT_LIST(V) \
3421   V(slli, SLLI)         \
3422   V(srai, SRAI)         \
3423   V(srli, SRLI)         \
3424   V(bclri, BCLRI)       \
3425   V(bseti, BSETI)       \
3426   V(bnegi, BNEGI)       \
3427   V(binsli, BINSLI)     \
3428   V(binsri, BINSRI)     \
3429   V(sat_s, SAT_S)       \
3430   V(sat_u, SAT_U)       \
3431   V(srari, SRARI)       \
3432   V(srlri, SRLRI)
3433 
3434 #define MSA_BIT_FORMAT(name, opcode, format)                      \
3435   void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3436                                   uint32_t m) {                   \
3437     GenInstrMsaBit(opcode, BIT_DF_##format, m, ws, wd);           \
3438   }
3439 
3440 #define MSA_BIT(name, opcode)     \
3441   MSA_BIT_FORMAT(name, opcode, b) \
3442   MSA_BIT_FORMAT(name, opcode, h) \
3443   MSA_BIT_FORMAT(name, opcode, w) \
3444   MSA_BIT_FORMAT(name, opcode, d)
3445 
MSA_BIT_LIST(MSA_BIT)3446 MSA_BIT_LIST(MSA_BIT)
3447 #undef MSA_BIT
3448 #undef MSA_BIT_FORMAT
3449 #undef MSA_BIT_LIST
3450 
3451 int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
3452                                          intptr_t pc_delta) {
3453   Instr instr = instr_at(pc);
3454 
3455   if (RelocInfo::IsInternalReference(rmode)) {
3456     int32_t* p = reinterpret_cast<int32_t*>(pc);
3457     if (*p == 0) {
3458       return 0;  // Number of instructions patched.
3459     }
3460     *p += pc_delta;
3461     return 1;  // Number of instructions patched.
3462   } else {
3463     DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
3464     if (IsLui(instr)) {
3465       Instr instr1 = instr_at(pc + 0 * kInstrSize);
3466       Instr instr2 = instr_at(pc + 1 * kInstrSize);
3467       DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
3468       int32_t imm;
3469       if (IsJicOrJialc(instr2)) {
3470         imm = CreateTargetAddress(instr1, instr2);
3471       } else {
3472         imm = GetLuiOriImmediate(instr1, instr2);
3473       }
3474 
3475       if (imm == kEndOfJumpChain) {
3476         return 0;  // Number of instructions patched.
3477       }
3478       imm += pc_delta;
3479       DCHECK_EQ(imm & 3, 0);
3480       instr1 &= ~kImm16Mask;
3481       instr2 &= ~kImm16Mask;
3482 
3483       if (IsJicOrJialc(instr2)) {
3484         uint32_t lui_offset_u, jic_offset_u;
3485         Assembler::UnpackTargetAddressUnsigned(imm,
3486                                                &lui_offset_u, &jic_offset_u);
3487         instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u);
3488         instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u);
3489       } else {
3490         PatchLuiOriImmediate(pc, imm, instr1, 0 * kInstrSize, instr2,
3491                              1 * kInstrSize);
3492       }
3493       return 2;  // Number of instructions patched.
3494     } else {
3495       UNREACHABLE();
3496     }
3497   }
3498 }
3499 
RelocateRelativeReference(RelocInfo::Mode rmode,Address pc,intptr_t pc_delta)3500 void Assembler::RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
3501                                           intptr_t pc_delta) {
3502   Instr instr = instr_at(pc);
3503 
3504   DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
3505   if (IsLui(instr)) {
3506     Instr instr1 = instr_at(pc + 0 * kInstrSize);
3507     Instr instr2 = instr_at(pc + 1 * kInstrSize);
3508     Instr instr3 = instr_at(pc + 2 * kInstrSize);
3509     int32_t imm;
3510     Address ori_offset;
3511     if (IsNal(instr2)) {
3512       instr2 = instr3;
3513       ori_offset = 2 * kInstrSize;
3514     } else {
3515       ori_offset = 1 * kInstrSize;
3516     }
3517     DCHECK(IsOri(instr2));
3518     imm = GetLuiOriImmediate(instr1, instr2);
3519     instr1 &= ~kImm16Mask;
3520     instr2 &= ~kImm16Mask;
3521 
3522     if (imm == kEndOfJumpChain) {
3523       return;
3524     }
3525     imm -= pc_delta;
3526     DCHECK_EQ(imm & 3, 0);
3527     PatchLuiOriImmediate(pc, imm, instr1, 0 * kInstrSize, instr2, ori_offset);
3528     return;
3529   } else {
3530     UNREACHABLE();
3531   }
3532 }
3533 
GrowBuffer()3534 void Assembler::GrowBuffer() {
3535   // Compute new buffer size.
3536   int old_size = buffer_->size();
3537   int new_size = std::min(2 * old_size, old_size + 1 * MB);
3538 
3539   // Some internal data structures overflow for very large buffers,
3540   // they must ensure that kMaximalBufferSize is not too large.
3541   if (new_size > kMaximalBufferSize) {
3542     V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
3543   }
3544 
3545   // Set up new buffer.
3546   std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
3547   DCHECK_EQ(new_size, new_buffer->size());
3548   byte* new_start = new_buffer->start();
3549 
3550   // Copy the data.
3551   int pc_delta = new_start - buffer_start_;
3552   int rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
3553   size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
3554   MemMove(new_start, buffer_start_, pc_offset());
3555   MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
3556           reloc_size);
3557 
3558   // Switch buffers.
3559   buffer_ = std::move(new_buffer);
3560   buffer_start_ = new_start;
3561   pc_ += pc_delta;
3562   last_call_pc_ += pc_delta;
3563   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3564                                reloc_info_writer.last_pc() + pc_delta);
3565 
3566   // Relocate runtime entries.
3567   Vector<byte> instructions{buffer_start_, pc_offset()};
3568   Vector<const byte> reloc_info{reloc_info_writer.pos(), reloc_size};
3569   for (RelocIterator it(instructions, reloc_info, 0); !it.done(); it.next()) {
3570     RelocInfo::Mode rmode = it.rinfo()->rmode();
3571     if (rmode == RelocInfo::INTERNAL_REFERENCE_ENCODED ||
3572         rmode == RelocInfo::INTERNAL_REFERENCE) {
3573       RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta);
3574     }
3575   }
3576   DCHECK(!overflow());
3577 }
3578 
db(uint8_t data)3579 void Assembler::db(uint8_t data) {
3580   CheckForEmitInForbiddenSlot();
3581   *reinterpret_cast<uint8_t*>(pc_) = data;
3582   pc_ += sizeof(uint8_t);
3583 }
3584 
dd(uint32_t data)3585 void Assembler::dd(uint32_t data) {
3586   CheckForEmitInForbiddenSlot();
3587   *reinterpret_cast<uint32_t*>(pc_) = data;
3588   pc_ += sizeof(uint32_t);
3589 }
3590 
dq(uint64_t data)3591 void Assembler::dq(uint64_t data) {
3592   CheckForEmitInForbiddenSlot();
3593   *reinterpret_cast<uint64_t*>(pc_) = data;
3594   pc_ += sizeof(uint64_t);
3595 }
3596 
dd(Label * label)3597 void Assembler::dd(Label* label) {
3598   uint32_t data;
3599   CheckForEmitInForbiddenSlot();
3600   if (label->is_bound()) {
3601     data = reinterpret_cast<uint32_t>(buffer_start_ + label->pos());
3602   } else {
3603     data = jump_address(label);
3604     unbound_labels_count_++;
3605     internal_reference_positions_.insert(label->pos());
3606   }
3607   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3608   EmitHelper(data);
3609 }
3610 
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)3611 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3612   if (!ShouldRecordRelocInfo(rmode)) return;
3613   // We do not try to reuse pool constants.
3614   RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
3615   DCHECK_GE(buffer_space(), kMaxRelocSize);  // Too late to grow buffer here.
3616   reloc_info_writer.Write(&rinfo);
3617 }
3618 
BlockTrampolinePoolFor(int instructions)3619 void Assembler::BlockTrampolinePoolFor(int instructions) {
3620   CheckTrampolinePoolQuick(instructions);
3621   BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
3622 }
3623 
CheckTrampolinePool()3624 void Assembler::CheckTrampolinePool() {
3625   // Some small sequences of instructions must not be broken up by the
3626   // insertion of a trampoline pool; such sequences are protected by setting
3627   // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
3628   // which are both checked here. Also, recursive calls to CheckTrampolinePool
3629   // are blocked by trampoline_pool_blocked_nesting_.
3630   if ((trampoline_pool_blocked_nesting_ > 0) ||
3631       (pc_offset() < no_trampoline_pool_before_)) {
3632     // Emission is currently blocked; make sure we try again as soon as
3633     // possible.
3634     if (trampoline_pool_blocked_nesting_ > 0) {
3635       next_buffer_check_ = pc_offset() + kInstrSize;
3636     } else {
3637       next_buffer_check_ = no_trampoline_pool_before_;
3638     }
3639     return;
3640   }
3641 
3642   DCHECK(!trampoline_emitted_);
3643   DCHECK_GE(unbound_labels_count_, 0);
3644   if (unbound_labels_count_ > 0) {
3645     // First we emit jump (2 instructions), then we emit trampoline pool.
3646     {
3647       BlockTrampolinePoolScope block_trampoline_pool(this);
3648       Label after_pool;
3649       if (IsMipsArchVariant(kMips32r6)) {
3650         bc(&after_pool);
3651       } else {
3652         b(&after_pool);
3653       }
3654       nop();
3655 
3656       int pool_start = pc_offset();
3657       for (int i = 0; i < unbound_labels_count_; i++) {
3658         {
3659           if (IsMipsArchVariant(kMips32r6)) {
3660             bc(&after_pool);
3661             nop();
3662           } else {
3663             GenPCRelativeJump(t8, t9, 0, RelocInfo::NONE,
3664                               BranchDelaySlot::PROTECT);
3665           }
3666         }
3667       }
3668       // If unbound_labels_count_ is big enough, label after_pool will
3669       // need a trampoline too, so we must create the trampoline before
3670       // the bind operation to make sure function 'bind' can get this
3671       // information.
3672       trampoline_ = Trampoline(pool_start, unbound_labels_count_);
3673       bind(&after_pool);
3674 
3675       trampoline_emitted_ = true;
3676       // As we are only going to emit trampoline once, we need to prevent any
3677       // further emission.
3678       next_buffer_check_ = kMaxInt;
3679     }
3680   } else {
3681     // Number of branches to unbound label at this point is zero, so we can
3682     // move next buffer check to maximum.
3683     next_buffer_check_ =
3684         pc_offset() + kMaxBranchOffset - kTrampolineSlotsSize * 16;
3685   }
3686   return;
3687 }
3688 
target_address_at(Address pc)3689 Address Assembler::target_address_at(Address pc) {
3690   Instr instr1 = instr_at(pc);
3691   Instr instr2 = instr_at(pc + kInstrSize);
3692   Instr instr3 = instr_at(pc + 2 * kInstrSize);
3693   // Interpret 2 instructions generated by li (lui/ori) or optimized pairs
3694   // lui/jic, aui/jic or lui/jialc.
3695   if (IsLui(instr1)) {
3696     if (IsOri(instr2)) {
3697       Address target_address;
3698       // Assemble the 32 bit value.
3699       target_address = GetLuiOriImmediate(instr1, instr2);
3700       if (IsAddu(instr3, t9, ra, t9)) {
3701         target_address += pc + kRelativeJumpForBuiltinsOffset;
3702       }
3703       return target_address;
3704     } else if (IsJicOrJialc(instr2)) {
3705       // Assemble the 32 bit value.
3706       return static_cast<Address>(CreateTargetAddress(instr1, instr2));
3707     } else if (IsNal(instr2)) {
3708       DCHECK(IsOri(instr3));
3709       Address target_address;
3710       target_address = GetLuiOriImmediate(instr1, instr3);
3711       return target_address + pc + kRelativeCallForBuiltinsOffset;
3712     }
3713   }
3714 
3715   // We should never get here, force a bad address if we do.
3716   UNREACHABLE();
3717 }
3718 
3719 // On Mips, a target address is stored in a lui/ori instruction pair, each
3720 // of which load 16 bits of the 32-bit address to a register.
3721 // Patching the address must replace both instr, and flush the i-cache.
3722 // On r6, target address is stored in a lui/jic pair, and both instr have to be
3723 // patched.
set_target_value_at(Address pc,uint32_t target,ICacheFlushMode icache_flush_mode)3724 void Assembler::set_target_value_at(Address pc, uint32_t target,
3725                                     ICacheFlushMode icache_flush_mode) {
3726   Instr instr1 = instr_at(pc);
3727   Instr instr2 = instr_at(pc + kInstrSize);
3728 
3729 #ifdef DEBUG
3730   // Check we have the result from a li macro-instruction, using instr pair.
3731   DCHECK(IsLui(instr1) &&
3732          (IsOri(instr2) || IsJicOrJialc(instr2) || IsNal(instr2)));
3733 #endif
3734 
3735   if (IsJicOrJialc(instr2)) {
3736     // Must use 2 instructions to insure patchable code => use lui and jic
3737     uint32_t lui_offset, jic_offset;
3738     Assembler::UnpackTargetAddressUnsigned(target, &lui_offset, &jic_offset);
3739 
3740     instr1 &= ~kImm16Mask;
3741     instr2 &= ~kImm16Mask;
3742 
3743     instr1 |= lui_offset;
3744     instr2 |= jic_offset;
3745 
3746     instr_at_put(pc, instr1);
3747     instr_at_put(pc + kInstrSize, instr2);
3748   } else {
3749     Instr instr3 = instr_at(pc + 2 * kInstrSize);
3750     // If we are using relative calls/jumps for builtins.
3751     if (IsNal(instr2)) {
3752       target -= pc + kRelativeCallForBuiltinsOffset;
3753     }
3754     if (IsAddu(instr3, t9, ra, t9)) {
3755       target -= pc + kRelativeJumpForBuiltinsOffset;
3756     }
3757     // Must use 2 instructions to insure patchable code => just use lui and ori.
3758     // lui rt, upper-16.
3759     // ori rt rt, lower-16.
3760     if (IsNal(instr2)) {
3761       instr1 &= ~kImm16Mask;
3762       instr3 &= ~kImm16Mask;
3763       PatchLuiOriImmediate(pc, target, instr1, 0 * kInstrSize, instr3,
3764                            2 * kInstrSize);
3765     } else {
3766       instr1 &= ~kImm16Mask;
3767       instr2 &= ~kImm16Mask;
3768       PatchLuiOriImmediate(pc, target, instr1, 0 * kInstrSize, instr2,
3769                            1 * kInstrSize);
3770     }
3771   }
3772 
3773   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
3774     FlushInstructionCache(pc, 2 * sizeof(int32_t));
3775   }
3776 }
3777 
GenPCRelativeJump(Register tf,Register ts,int32_t imm32,RelocInfo::Mode rmode,BranchDelaySlot bdslot)3778 void Assembler::GenPCRelativeJump(Register tf, Register ts, int32_t imm32,
3779                                   RelocInfo::Mode rmode,
3780                                   BranchDelaySlot bdslot) {
3781   // Order of these instructions is relied upon when patching them
3782   // or when changing imm32 that lui/ori pair loads.
3783   or_(tf, ra, zero_reg);
3784   nal();  // Relative place of nal instruction determines kLongBranchPCOffset.
3785   if (!RelocInfo::IsNone(rmode)) {
3786     RecordRelocInfo(rmode);
3787   }
3788   lui(ts, (imm32 & kHiMask) >> kLuiShift);
3789   ori(ts, ts, (imm32 & kImm16Mask));
3790   addu(ts, ra, ts);
3791   if (bdslot == USE_DELAY_SLOT) {
3792     or_(ra, tf, zero_reg);
3793   }
3794   jr(ts);
3795   if (bdslot == PROTECT) {
3796     or_(ra, tf, zero_reg);
3797   }
3798 }
3799 
GenPCRelativeJumpAndLink(Register t,int32_t imm32,RelocInfo::Mode rmode,BranchDelaySlot bdslot)3800 void Assembler::GenPCRelativeJumpAndLink(Register t, int32_t imm32,
3801                                          RelocInfo::Mode rmode,
3802                                          BranchDelaySlot bdslot) {
3803   if (!RelocInfo::IsNone(rmode)) {
3804     RecordRelocInfo(rmode);
3805   }
3806   // Order of these instructions is relied upon when patching them
3807   // or when changing imm32 that lui/ori pair loads.
3808   lui(t, (imm32 & kHiMask) >> kLuiShift);
3809   nal();  // Relative place of nal instruction determines kLongBranchPCOffset.
3810   ori(t, t, (imm32 & kImm16Mask));
3811   addu(t, ra, t);
3812   jalr(t);
3813   if (bdslot == PROTECT) nop();
3814   set_last_call_pc_(pc_);
3815 }
3816 
UseScratchRegisterScope(Assembler * assembler)3817 UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
3818     : available_(assembler->GetScratchRegisterList()),
3819       old_available_(*available_) {}
3820 
~UseScratchRegisterScope()3821 UseScratchRegisterScope::~UseScratchRegisterScope() {
3822   *available_ = old_available_;
3823 }
3824 
Acquire()3825 Register UseScratchRegisterScope::Acquire() {
3826   DCHECK_NOT_NULL(available_);
3827   DCHECK_NE(*available_, 0);
3828   int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
3829   *available_ &= ~(1UL << index);
3830 
3831   return Register::from_code(index);
3832 }
3833 
hasAvailable() const3834 bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; }
3835 
3836 }  // namespace internal
3837 }  // namespace v8
3838 
3839 #endif  // V8_TARGET_ARCH_MIPS
3840