• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 #include "src/mips64/assembler-mips64.h"
36 
37 #if V8_TARGET_ARCH_MIPS64
38 
39 #include "src/base/cpu.h"
40 #include "src/code-stubs.h"
41 #include "src/deoptimizer.h"
42 #include "src/mips64/assembler-mips64-inl.h"
43 
44 namespace v8 {
45 namespace internal {
46 
47 
48 // Get the CPU features enabled by the build. For cross compilation the
49 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
50 // can be defined to enable FPU instructions when building the
51 // snapshot.
CpuFeaturesImpliedByCompiler()52 static unsigned CpuFeaturesImpliedByCompiler() {
53   unsigned answer = 0;
54 #ifdef CAN_USE_FPU_INSTRUCTIONS
55   answer |= 1u << FPU;
56 #endif  // def CAN_USE_FPU_INSTRUCTIONS
57 
58   // If the compiler is allowed to use FPU then we can use FPU too in our code
59   // generation even when generating snapshots.  This won't work for cross
60   // compilation.
61 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
62   answer |= 1u << FPU;
63 #endif
64 
65   return answer;
66 }
67 
68 
ProbeImpl(bool cross_compile)69 void CpuFeatures::ProbeImpl(bool cross_compile) {
70   supported_ |= CpuFeaturesImpliedByCompiler();
71 
72   // Only use statically determined features for cross compile (snapshot).
73   if (cross_compile) return;
74 
75   // If the compiler is allowed to use fpu then we can use fpu too in our
76   // code generation.
77 #ifndef __mips__
78   // For the simulator build, use FPU.
79   supported_ |= 1u << FPU;
80 #if defined(_MIPS_ARCH_MIPS64R6) && defined(_MIPS_MSA)
81   supported_ |= 1u << MIPS_SIMD;
82 #endif
83 #else
84   // Probe for additional features at runtime.
85   base::CPU cpu;
86   if (cpu.has_fpu()) supported_ |= 1u << FPU;
87 #if defined(_MIPS_ARCH_MIPS64R6)
88 #if defined(_MIPS_MSA)
89   supported_ |= 1u << MIPS_SIMD;
90 #else
91   if (cpu.has_msa()) supported_ |= 1u << MIPS_SIMD;
92 #endif
93 #endif
94 #endif
95 }
96 
97 
PrintTarget()98 void CpuFeatures::PrintTarget() { }
PrintFeatures()99 void CpuFeatures::PrintFeatures() { }
100 
101 
ToNumber(Register reg)102 int ToNumber(Register reg) {
103   DCHECK(reg.is_valid());
104   const int kNumbers[] = {
105     0,    // zero_reg
106     1,    // at
107     2,    // v0
108     3,    // v1
109     4,    // a0
110     5,    // a1
111     6,    // a2
112     7,    // a3
113     8,    // a4
114     9,    // a5
115     10,   // a6
116     11,   // a7
117     12,   // t0
118     13,   // t1
119     14,   // t2
120     15,   // t3
121     16,   // s0
122     17,   // s1
123     18,   // s2
124     19,   // s3
125     20,   // s4
126     21,   // s5
127     22,   // s6
128     23,   // s7
129     24,   // t8
130     25,   // t9
131     26,   // k0
132     27,   // k1
133     28,   // gp
134     29,   // sp
135     30,   // fp
136     31,   // ra
137   };
138   return kNumbers[reg.code()];
139 }
140 
141 
ToRegister(int num)142 Register ToRegister(int num) {
143   DCHECK(num >= 0 && num < kNumRegisters);
144   const Register kRegisters[] = {
145     zero_reg,
146     at,
147     v0, v1,
148     a0, a1, a2, a3, a4, a5, a6, a7,
149     t0, t1, t2, t3,
150     s0, s1, s2, s3, s4, s5, s6, s7,
151     t8, t9,
152     k0, k1,
153     gp,
154     sp,
155     fp,
156     ra
157   };
158   return kRegisters[num];
159 }
160 
161 
162 // -----------------------------------------------------------------------------
163 // Implementation of RelocInfo.
164 
165 const int RelocInfo::kApplyMask =
166     RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
167     RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
168 
IsCodedSpecially()169 bool RelocInfo::IsCodedSpecially() {
170   // The deserializer needs to know whether a pointer is specially coded.  Being
171   // specially coded on MIPS means that it is a lui/ori instruction, and that is
172   // always the case inside code objects.
173   return true;
174 }
175 
176 
IsInConstantPool()177 bool RelocInfo::IsInConstantPool() {
178   return false;
179 }
180 
GetDeoptimizationId(Isolate * isolate,DeoptimizeKind kind)181 int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
182   DCHECK(IsRuntimeEntry(rmode_));
183   return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
184 }
185 
set_js_to_wasm_address(Address address,ICacheFlushMode icache_flush_mode)186 void RelocInfo::set_js_to_wasm_address(Address address,
187                                        ICacheFlushMode icache_flush_mode) {
188   DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
189   Assembler::set_target_address_at(pc_, constant_pool_, address,
190                                    icache_flush_mode);
191 }
192 
js_to_wasm_address() const193 Address RelocInfo::js_to_wasm_address() const {
194   DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
195   return Assembler::target_address_at(pc_, constant_pool_);
196 }
197 
wasm_call_tag() const198 uint32_t RelocInfo::wasm_call_tag() const {
199   DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
200   return static_cast<uint32_t>(
201       Assembler::target_address_at(pc_, constant_pool_));
202 }
203 
204 // -----------------------------------------------------------------------------
205 // Implementation of Operand and MemOperand.
206 // See assembler-mips-inl.h for inlined constructors.
207 
Operand(Handle<HeapObject> handle)208 Operand::Operand(Handle<HeapObject> handle)
209     : rm_(no_reg), rmode_(RelocInfo::EMBEDDED_OBJECT) {
210   value_.immediate = static_cast<intptr_t>(handle.address());
211 }
212 
EmbeddedNumber(double value)213 Operand Operand::EmbeddedNumber(double value) {
214   int32_t smi;
215   if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
216   Operand result(0, RelocInfo::EMBEDDED_OBJECT);
217   result.is_heap_object_request_ = true;
218   result.value_.heap_object_request = HeapObjectRequest(value);
219   return result;
220 }
221 
EmbeddedCode(CodeStub * stub)222 Operand Operand::EmbeddedCode(CodeStub* stub) {
223   Operand result(0, RelocInfo::CODE_TARGET);
224   result.is_heap_object_request_ = true;
225   result.value_.heap_object_request = HeapObjectRequest(stub);
226   return result;
227 }
228 
MemOperand(Register rm,int32_t offset)229 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
230   offset_ = offset;
231 }
232 
233 
MemOperand(Register rm,int32_t unit,int32_t multiplier,OffsetAddend offset_addend)234 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
235                        OffsetAddend offset_addend)
236     : Operand(rm) {
237   offset_ = unit * multiplier + offset_addend;
238 }
239 
AllocateAndInstallRequestedHeapObjects(Isolate * isolate)240 void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
241   for (auto& request : heap_object_requests_) {
242     Handle<HeapObject> object;
243     switch (request.kind()) {
244       case HeapObjectRequest::kHeapNumber:
245         object =
246             isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
247         break;
248       case HeapObjectRequest::kCodeStub:
249         request.code_stub()->set_isolate(isolate);
250         object = request.code_stub()->GetCode();
251         break;
252     }
253     Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
254     set_target_value_at(pc, reinterpret_cast<uint64_t>(object.location()));
255   }
256 }
257 
258 // -----------------------------------------------------------------------------
259 // Specific instructions, constants, and masks.
260 
261 // daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
262 // operations as post-increment of sp.
263 const Instr kPopInstruction = DADDIU | (sp.code() << kRsShift) |
264                               (sp.code() << kRtShift) |
265                               (kPointerSize & kImm16Mask);  // NOLINT
266 // daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
267 const Instr kPushInstruction = DADDIU | (sp.code() << kRsShift) |
268                                (sp.code() << kRtShift) |
269                                (-kPointerSize & kImm16Mask);  // NOLINT
270 // Sd(r, MemOperand(sp, 0))
271 const Instr kPushRegPattern =
272     SD | (sp.code() << kRsShift) | (0 & kImm16Mask);  // NOLINT
273 //  Ld(r, MemOperand(sp, 0))
274 const Instr kPopRegPattern =
275     LD | (sp.code() << kRsShift) | (0 & kImm16Mask);  // NOLINT
276 
277 const Instr kLwRegFpOffsetPattern =
278     LW | (fp.code() << kRsShift) | (0 & kImm16Mask);  // NOLINT
279 
280 const Instr kSwRegFpOffsetPattern =
281     SW | (fp.code() << kRsShift) | (0 & kImm16Mask);  // NOLINT
282 
283 const Instr kLwRegFpNegOffsetPattern =
284     LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask);  // NOLINT
285 
286 const Instr kSwRegFpNegOffsetPattern =
287     SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask);  // NOLINT
288 // A mask for the Rt register for push, pop, lw, sw instructions.
289 const Instr kRtMask = kRtFieldMask;
290 const Instr kLwSwInstrTypeMask = 0xFFE00000;
291 const Instr kLwSwInstrArgumentMask  = ~kLwSwInstrTypeMask;
292 const Instr kLwSwOffsetMask = kImm16Mask;
293 
Assembler(const AssemblerOptions & options,void * buffer,int buffer_size)294 Assembler::Assembler(const AssemblerOptions& options, void* buffer,
295                      int buffer_size)
296     : AssemblerBase(options, buffer, buffer_size),
297       scratch_register_list_(at.bit()) {
298   reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
299 
300   last_trampoline_pool_end_ = 0;
301   no_trampoline_pool_before_ = 0;
302   trampoline_pool_blocked_nesting_ = 0;
303   // We leave space (16 * kTrampolineSlotsSize)
304   // for BlockTrampolinePoolScope buffer.
305   next_buffer_check_ = FLAG_force_long_branches
306       ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
307   internal_trampoline_exception_ = false;
308   last_bound_pos_ = 0;
309 
310   trampoline_emitted_ = FLAG_force_long_branches;
311   unbound_labels_count_ = 0;
312   block_buffer_growth_ = false;
313 }
314 
GetCode(Isolate * isolate,CodeDesc * desc)315 void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
316   EmitForbiddenSlotInstruction();
317   DCHECK(pc_ <= reloc_info_writer.pos());  // No overlap.
318 
319   AllocateAndInstallRequestedHeapObjects(isolate);
320 
321   // Set up code descriptor.
322   desc->buffer = buffer_;
323   desc->buffer_size = buffer_size_;
324   desc->instr_size = pc_offset();
325   desc->reloc_size =
326       static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
327   desc->origin = this;
328   desc->constant_pool_size = 0;
329   desc->unwinding_info_size = 0;
330   desc->unwinding_info = nullptr;
331 }
332 
333 
Align(int m)334 void Assembler::Align(int m) {
335   DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
336   EmitForbiddenSlotInstruction();
337   while ((pc_offset() & (m - 1)) != 0) {
338     nop();
339   }
340 }
341 
342 
CodeTargetAlign()343 void Assembler::CodeTargetAlign() {
344   // No advantage to aligning branch/call targets to more than
345   // single instruction, that I am aware of.
346   Align(4);
347 }
348 
349 
GetRtReg(Instr instr)350 Register Assembler::GetRtReg(Instr instr) {
351   return Register::from_code((instr & kRtFieldMask) >> kRtShift);
352 }
353 
354 
GetRsReg(Instr instr)355 Register Assembler::GetRsReg(Instr instr) {
356   return Register::from_code((instr & kRsFieldMask) >> kRsShift);
357 }
358 
359 
GetRdReg(Instr instr)360 Register Assembler::GetRdReg(Instr instr) {
361   return Register::from_code((instr & kRdFieldMask) >> kRdShift);
362 }
363 
364 
GetRt(Instr instr)365 uint32_t Assembler::GetRt(Instr instr) {
366   return (instr & kRtFieldMask) >> kRtShift;
367 }
368 
369 
GetRtField(Instr instr)370 uint32_t Assembler::GetRtField(Instr instr) {
371   return instr & kRtFieldMask;
372 }
373 
374 
GetRs(Instr instr)375 uint32_t Assembler::GetRs(Instr instr) {
376   return (instr & kRsFieldMask) >> kRsShift;
377 }
378 
379 
GetRsField(Instr instr)380 uint32_t Assembler::GetRsField(Instr instr) {
381   return instr & kRsFieldMask;
382 }
383 
384 
GetRd(Instr instr)385 uint32_t Assembler::GetRd(Instr instr) {
386   return  (instr & kRdFieldMask) >> kRdShift;
387 }
388 
389 
GetRdField(Instr instr)390 uint32_t Assembler::GetRdField(Instr instr) {
391   return  instr & kRdFieldMask;
392 }
393 
394 
GetSa(Instr instr)395 uint32_t Assembler::GetSa(Instr instr) {
396   return (instr & kSaFieldMask) >> kSaShift;
397 }
398 
399 
GetSaField(Instr instr)400 uint32_t Assembler::GetSaField(Instr instr) {
401   return instr & kSaFieldMask;
402 }
403 
404 
GetOpcodeField(Instr instr)405 uint32_t Assembler::GetOpcodeField(Instr instr) {
406   return instr & kOpcodeMask;
407 }
408 
409 
GetFunction(Instr instr)410 uint32_t Assembler::GetFunction(Instr instr) {
411   return (instr & kFunctionFieldMask) >> kFunctionShift;
412 }
413 
414 
GetFunctionField(Instr instr)415 uint32_t Assembler::GetFunctionField(Instr instr) {
416   return instr & kFunctionFieldMask;
417 }
418 
419 
GetImmediate16(Instr instr)420 uint32_t Assembler::GetImmediate16(Instr instr) {
421   return instr & kImm16Mask;
422 }
423 
424 
GetLabelConst(Instr instr)425 uint32_t Assembler::GetLabelConst(Instr instr) {
426   return instr & ~kImm16Mask;
427 }
428 
429 
IsPop(Instr instr)430 bool Assembler::IsPop(Instr instr) {
431   return (instr & ~kRtMask) == kPopRegPattern;
432 }
433 
434 
IsPush(Instr instr)435 bool Assembler::IsPush(Instr instr) {
436   return (instr & ~kRtMask) == kPushRegPattern;
437 }
438 
439 
IsSwRegFpOffset(Instr instr)440 bool Assembler::IsSwRegFpOffset(Instr instr) {
441   return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
442 }
443 
444 
IsLwRegFpOffset(Instr instr)445 bool Assembler::IsLwRegFpOffset(Instr instr) {
446   return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
447 }
448 
449 
IsSwRegFpNegOffset(Instr instr)450 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
451   return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
452           kSwRegFpNegOffsetPattern);
453 }
454 
455 
IsLwRegFpNegOffset(Instr instr)456 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
457   return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
458           kLwRegFpNegOffsetPattern);
459 }
460 
461 
462 // Labels refer to positions in the (to be) generated code.
463 // There are bound, linked, and unused labels.
464 //
465 // Bound labels refer to known positions in the already
466 // generated code. pos() is the position the label refers to.
467 //
468 // Linked labels refer to unknown positions in the code
469 // to be generated; pos() is the position of the last
470 // instruction using the label.
471 
472 // The link chain is terminated by a value in the instruction of -1,
473 // which is an otherwise illegal value (branch -1 is inf loop).
474 // The instruction 16-bit offset field addresses 32-bit words, but in
475 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
476 
477 const int kEndOfChain = -4;
478 // Determines the end of the Jump chain (a subset of the label link chain).
479 const int kEndOfJumpChain = 0;
480 
IsMsaBranch(Instr instr)481 bool Assembler::IsMsaBranch(Instr instr) {
482   uint32_t opcode = GetOpcodeField(instr);
483   uint32_t rs_field = GetRsField(instr);
484   if (opcode == COP1) {
485     switch (rs_field) {
486       case BZ_V:
487       case BZ_B:
488       case BZ_H:
489       case BZ_W:
490       case BZ_D:
491       case BNZ_V:
492       case BNZ_B:
493       case BNZ_H:
494       case BNZ_W:
495       case BNZ_D:
496         return true;
497       default:
498         return false;
499     }
500   } else {
501     return false;
502   }
503 }
504 
IsBranch(Instr instr)505 bool Assembler::IsBranch(Instr instr) {
506   uint32_t opcode   = GetOpcodeField(instr);
507   uint32_t rt_field = GetRtField(instr);
508   uint32_t rs_field = GetRsField(instr);
509   // Checks if the instruction is a branch.
510   bool isBranch =
511       opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
512       opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
513       (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
514                             rt_field == BLTZAL || rt_field == BGEZAL)) ||
515       (opcode == COP1 && rs_field == BC1) ||  // Coprocessor branch.
516       (opcode == COP1 && rs_field == BC1EQZ) ||
517       (opcode == COP1 && rs_field == BC1NEZ) || IsMsaBranch(instr);
518   if (!isBranch && kArchVariant == kMips64r6) {
519     // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
520     // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
521     isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
522                 opcode == BALC ||
523                 (opcode == POP66 && rs_field != 0) ||  // BEQZC
524                 (opcode == POP76 && rs_field != 0);    // BNEZC
525   }
526   return isBranch;
527 }
528 
529 
IsBc(Instr instr)530 bool Assembler::IsBc(Instr instr) {
531   uint32_t opcode = GetOpcodeField(instr);
532   // Checks if the instruction is a BC or BALC.
533   return opcode == BC || opcode == BALC;
534 }
535 
IsNal(Instr instr)536 bool Assembler::IsNal(Instr instr) {
537   uint32_t opcode = GetOpcodeField(instr);
538   uint32_t rt_field = GetRtField(instr);
539   uint32_t rs_field = GetRsField(instr);
540   return opcode == REGIMM && rt_field == BLTZAL && rs_field == 0;
541 }
542 
IsBzc(Instr instr)543 bool Assembler::IsBzc(Instr instr) {
544   uint32_t opcode = GetOpcodeField(instr);
545   // Checks if the instruction is BEQZC or BNEZC.
546   return (opcode == POP66 && GetRsField(instr) != 0) ||
547          (opcode == POP76 && GetRsField(instr) != 0);
548 }
549 
550 
IsEmittedConstant(Instr instr)551 bool Assembler::IsEmittedConstant(Instr instr) {
552   uint32_t label_constant = GetLabelConst(instr);
553   return label_constant == 0;  // Emitted label const in reg-exp engine.
554 }
555 
556 
IsBeq(Instr instr)557 bool Assembler::IsBeq(Instr instr) {
558   return GetOpcodeField(instr) == BEQ;
559 }
560 
561 
IsBne(Instr instr)562 bool Assembler::IsBne(Instr instr) {
563   return GetOpcodeField(instr) == BNE;
564 }
565 
566 
IsBeqzc(Instr instr)567 bool Assembler::IsBeqzc(Instr instr) {
568   uint32_t opcode = GetOpcodeField(instr);
569   return opcode == POP66 && GetRsField(instr) != 0;
570 }
571 
572 
IsBnezc(Instr instr)573 bool Assembler::IsBnezc(Instr instr) {
574   uint32_t opcode = GetOpcodeField(instr);
575   return opcode == POP76 && GetRsField(instr) != 0;
576 }
577 
578 
IsBeqc(Instr instr)579 bool Assembler::IsBeqc(Instr instr) {
580   uint32_t opcode = GetOpcodeField(instr);
581   uint32_t rs = GetRsField(instr);
582   uint32_t rt = GetRtField(instr);
583   return opcode == POP10 && rs != 0 && rs < rt;  // && rt != 0
584 }
585 
586 
IsBnec(Instr instr)587 bool Assembler::IsBnec(Instr instr) {
588   uint32_t opcode = GetOpcodeField(instr);
589   uint32_t rs = GetRsField(instr);
590   uint32_t rt = GetRtField(instr);
591   return opcode == POP30 && rs != 0 && rs < rt;  // && rt != 0
592 }
593 
IsMov(Instr instr,Register rd,Register rs)594 bool Assembler::IsMov(Instr instr, Register rd, Register rs) {
595   uint32_t opcode = GetOpcodeField(instr);
596   uint32_t rd_field = GetRd(instr);
597   uint32_t rs_field = GetRs(instr);
598   uint32_t rt_field = GetRt(instr);
599   uint32_t rd_reg = static_cast<uint32_t>(rd.code());
600   uint32_t rs_reg = static_cast<uint32_t>(rs.code());
601   uint32_t function_field = GetFunctionField(instr);
602   // Checks if the instruction is a OR with zero_reg argument (aka MOV).
603   bool res = opcode == SPECIAL && function_field == OR && rd_field == rd_reg &&
604              rs_field == rs_reg && rt_field == 0;
605   return res;
606 }
607 
IsJump(Instr instr)608 bool Assembler::IsJump(Instr instr) {
609   uint32_t opcode   = GetOpcodeField(instr);
610   uint32_t rt_field = GetRtField(instr);
611   uint32_t rd_field = GetRdField(instr);
612   uint32_t function_field = GetFunctionField(instr);
613   // Checks if the instruction is a jump.
614   return opcode == J || opcode == JAL ||
615       (opcode == SPECIAL && rt_field == 0 &&
616       ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
617 }
618 
619 
IsJ(Instr instr)620 bool Assembler::IsJ(Instr instr) {
621   uint32_t opcode = GetOpcodeField(instr);
622   // Checks if the instruction is a jump.
623   return opcode == J;
624 }
625 
626 
IsJal(Instr instr)627 bool Assembler::IsJal(Instr instr) {
628   return GetOpcodeField(instr) == JAL;
629 }
630 
631 
IsJr(Instr instr)632 bool Assembler::IsJr(Instr instr) {
633   return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
634 }
635 
636 
IsJalr(Instr instr)637 bool Assembler::IsJalr(Instr instr) {
638   return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
639 }
640 
641 
IsLui(Instr instr)642 bool Assembler::IsLui(Instr instr) {
643   uint32_t opcode = GetOpcodeField(instr);
644   // Checks if the instruction is a load upper immediate.
645   return opcode == LUI;
646 }
647 
648 
IsOri(Instr instr)649 bool Assembler::IsOri(Instr instr) {
650   uint32_t opcode = GetOpcodeField(instr);
651   // Checks if the instruction is a load upper immediate.
652   return opcode == ORI;
653 }
654 
655 
IsNop(Instr instr,unsigned int type)656 bool Assembler::IsNop(Instr instr, unsigned int type) {
657   // See Assembler::nop(type).
658   DCHECK_LT(type, 32);
659   uint32_t opcode = GetOpcodeField(instr);
660   uint32_t function = GetFunctionField(instr);
661   uint32_t rt = GetRt(instr);
662   uint32_t rd = GetRd(instr);
663   uint32_t sa = GetSa(instr);
664 
665   // Traditional mips nop == sll(zero_reg, zero_reg, 0)
666   // When marking non-zero type, use sll(zero_reg, at, type)
667   // to avoid use of mips ssnop and ehb special encodings
668   // of the sll instruction.
669 
670   Register nop_rt_reg = (type == 0) ? zero_reg : at;
671   bool ret = (opcode == SPECIAL && function == SLL &&
672               rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
673               rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
674               sa == type);
675 
676   return ret;
677 }
678 
679 
GetBranchOffset(Instr instr)680 int32_t Assembler::GetBranchOffset(Instr instr) {
681   DCHECK(IsBranch(instr));
682   return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
683 }
684 
685 
IsLw(Instr instr)686 bool Assembler::IsLw(Instr instr) {
687   return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
688 }
689 
690 
GetLwOffset(Instr instr)691 int16_t Assembler::GetLwOffset(Instr instr) {
692   DCHECK(IsLw(instr));
693   return ((instr & kImm16Mask));
694 }
695 
696 
SetLwOffset(Instr instr,int16_t offset)697 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
698   DCHECK(IsLw(instr));
699 
700   // We actually create a new lw instruction based on the original one.
701   Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
702       | (offset & kImm16Mask);
703 
704   return temp_instr;
705 }
706 
707 
IsSw(Instr instr)708 bool Assembler::IsSw(Instr instr) {
709   return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
710 }
711 
712 
SetSwOffset(Instr instr,int16_t offset)713 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
714   DCHECK(IsSw(instr));
715   return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
716 }
717 
718 
IsAddImmediate(Instr instr)719 bool Assembler::IsAddImmediate(Instr instr) {
720   return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU);
721 }
722 
723 
SetAddImmediateOffset(Instr instr,int16_t offset)724 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
725   DCHECK(IsAddImmediate(instr));
726   return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
727 }
728 
729 
IsAndImmediate(Instr instr)730 bool Assembler::IsAndImmediate(Instr instr) {
731   return GetOpcodeField(instr) == ANDI;
732 }
733 
734 
OffsetSizeInBits(Instr instr)735 static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
736   if (kArchVariant == kMips64r6) {
737     if (Assembler::IsBc(instr)) {
738       return Assembler::OffsetSize::kOffset26;
739     } else if (Assembler::IsBzc(instr)) {
740       return Assembler::OffsetSize::kOffset21;
741     }
742   }
743   return Assembler::OffsetSize::kOffset16;
744 }
745 
746 
AddBranchOffset(int pos,Instr instr)747 static inline int32_t AddBranchOffset(int pos, Instr instr) {
748   int bits = OffsetSizeInBits(instr);
749   const int32_t mask = (1 << bits) - 1;
750   bits = 32 - bits;
751 
752   // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
753   // the compiler uses arithmetic shifts for signed integers.
754   int32_t imm = ((instr & mask) << bits) >> (bits - 2);
755 
756   if (imm == kEndOfChain) {
757     // EndOfChain sentinel is returned directly, not relative to pc or pos.
758     return kEndOfChain;
759   } else {
760     return pos + Assembler::kBranchPCOffset + imm;
761   }
762 }
763 
764 
target_at(int pos,bool is_internal)765 int Assembler::target_at(int pos, bool is_internal) {
766   if (is_internal) {
767     int64_t* p = reinterpret_cast<int64_t*>(buffer_ + pos);
768     int64_t address = *p;
769     if (address == kEndOfJumpChain) {
770       return kEndOfChain;
771     } else {
772       int64_t instr_address = reinterpret_cast<int64_t>(p);
773       DCHECK(instr_address - address < INT_MAX);
774       int delta = static_cast<int>(instr_address - address);
775       DCHECK(pos > delta);
776       return pos - delta;
777     }
778   }
779   Instr instr = instr_at(pos);
780   if ((instr & ~kImm16Mask) == 0) {
781     // Emitted label constant, not part of a branch.
782     if (instr == 0) {
783        return kEndOfChain;
784      } else {
785        int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
786        return (imm18 + pos);
787      }
788   }
789   // Check we have a branch or jump instruction.
790   DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr) ||
791          IsMov(instr, t8, ra));
792   // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
793   // the compiler uses arithmetic shifts for signed integers.
794   if (IsBranch(instr)) {
795     return AddBranchOffset(pos, instr);
796   } else if (IsMov(instr, t8, ra)) {
797     int32_t imm32;
798     Instr instr_lui = instr_at(pos + 2 * kInstrSize);
799     Instr instr_ori = instr_at(pos + 3 * kInstrSize);
800     DCHECK(IsLui(instr_lui));
801     DCHECK(IsOri(instr_ori));
802     imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
803     imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
804     if (imm32 == kEndOfJumpChain) {
805       // EndOfChain sentinel is returned directly, not relative to pc or pos.
806       return kEndOfChain;
807     }
808     return pos + Assembler::kLongBranchPCOffset + imm32;
809   } else if (IsLui(instr)) {
810     if (IsNal(instr_at(pos + kInstrSize))) {
811       int32_t imm32;
812       Instr instr_lui = instr_at(pos + 0 * kInstrSize);
813       Instr instr_ori = instr_at(pos + 2 * kInstrSize);
814       DCHECK(IsLui(instr_lui));
815       DCHECK(IsOri(instr_ori));
816       imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
817       imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
818       if (imm32 == kEndOfJumpChain) {
819         // EndOfChain sentinel is returned directly, not relative to pc or pos.
820         return kEndOfChain;
821       }
822       return pos + Assembler::kLongBranchPCOffset + imm32;
823     } else {
824       Instr instr_lui = instr_at(pos + 0 * kInstrSize);
825       Instr instr_ori = instr_at(pos + 1 * kInstrSize);
826       Instr instr_ori2 = instr_at(pos + 3 * kInstrSize);
827       DCHECK(IsOri(instr_ori));
828       DCHECK(IsOri(instr_ori2));
829 
830       // TODO(plind) create named constants for shift values.
831       int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
832       imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
833       imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
834       // Sign extend address;
835       imm >>= 16;
836 
837       if (imm == kEndOfJumpChain) {
838         // EndOfChain sentinel is returned directly, not relative to pc or pos.
839         return kEndOfChain;
840       } else {
841         uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
842         DCHECK(instr_address - imm < INT_MAX);
843         int delta = static_cast<int>(instr_address - imm);
844         DCHECK(pos > delta);
845         return pos - delta;
846       }
847     }
848   } else {
849     DCHECK(IsJ(instr) || IsJal(instr));
850     int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
851     if (imm28 == kEndOfJumpChain) {
852       // EndOfChain sentinel is returned directly, not relative to pc or pos.
853       return kEndOfChain;
854     } else {
855       // Sign extend 28-bit offset.
856       int32_t delta = static_cast<int32_t>((imm28 << 4) >> 4);
857       return pos + delta;
858     }
859   }
860 }
861 
862 
SetBranchOffset(int32_t pos,int32_t target_pos,Instr instr)863 static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
864                                     Instr instr) {
865   int32_t bits = OffsetSizeInBits(instr);
866   int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
867   DCHECK_EQ(imm & 3, 0);
868   imm >>= 2;
869 
870   const int32_t mask = (1 << bits) - 1;
871   instr &= ~mask;
872   DCHECK(is_intn(imm, bits));
873 
874   return instr | (imm & mask);
875 }
876 
877 
target_at_put(int pos,int target_pos,bool is_internal)878 void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
879   if (is_internal) {
880     uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
881     *reinterpret_cast<uint64_t*>(buffer_ + pos) = imm;
882     return;
883   }
884   Instr instr = instr_at(pos);
885   if ((instr & ~kImm16Mask) == 0) {
886     DCHECK(target_pos == kEndOfChain || target_pos >= 0);
887     // Emitted label constant, not part of a branch.
888     // Make label relative to Code* of generated Code object.
889     instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
890     return;
891   }
892 
893   if (IsBranch(instr)) {
894     instr = SetBranchOffset(pos, target_pos, instr);
895     instr_at_put(pos, instr);
896   } else if (IsLui(instr)) {
897     if (IsNal(instr_at(pos + kInstrSize))) {
898       Instr instr_lui = instr_at(pos + 0 * kInstrSize);
899       Instr instr_ori = instr_at(pos + 2 * kInstrSize);
900       DCHECK(IsLui(instr_lui));
901       DCHECK(IsOri(instr_ori));
902       int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
903       DCHECK_EQ(imm & 3, 0);
904       if (is_int16(imm + Assembler::kLongBranchPCOffset -
905                    Assembler::kBranchPCOffset)) {
906         // Optimize by converting to regular branch and link with 16-bit
907         // offset.
908         Instr instr_b = REGIMM | BGEZAL;  // Branch and link.
909         instr_b = SetBranchOffset(pos, target_pos, instr_b);
910         // Correct ra register to point to one instruction after jalr from
911         // TurboAssembler::BranchAndLinkLong.
912         Instr instr_a = DADDIU | ra.code() << kRsShift | ra.code() << kRtShift |
913                         kOptimizedBranchAndLinkLongReturnOffset;
914 
915         instr_at_put(pos, instr_b);
916         instr_at_put(pos + 1 * kInstrSize, instr_a);
917       } else {
918         instr_lui &= ~kImm16Mask;
919         instr_ori &= ~kImm16Mask;
920 
921         instr_at_put(pos + 0 * kInstrSize,
922                      instr_lui | ((imm >> kLuiShift) & kImm16Mask));
923         instr_at_put(pos + 2 * kInstrSize, instr_ori | (imm & kImm16Mask));
924       }
925     } else {
926       Instr instr_lui = instr_at(pos + 0 * kInstrSize);
927       Instr instr_ori = instr_at(pos + 1 * kInstrSize);
928       Instr instr_ori2 = instr_at(pos + 3 * kInstrSize);
929       DCHECK(IsOri(instr_ori));
930       DCHECK(IsOri(instr_ori2));
931 
932       uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
933       DCHECK_EQ(imm & 3, 0);
934 
935       instr_lui &= ~kImm16Mask;
936       instr_ori &= ~kImm16Mask;
937       instr_ori2 &= ~kImm16Mask;
938 
939       instr_at_put(pos + 0 * kInstrSize,
940                    instr_lui | ((imm >> 32) & kImm16Mask));
941       instr_at_put(pos + 1 * kInstrSize,
942                    instr_ori | ((imm >> 16) & kImm16Mask));
943       instr_at_put(pos + 3 * kInstrSize, instr_ori2 | (imm & kImm16Mask));
944     }
945   } else if (IsMov(instr, t8, ra)) {
946     Instr instr_lui = instr_at(pos + 2 * kInstrSize);
947     Instr instr_ori = instr_at(pos + 3 * kInstrSize);
948     DCHECK(IsLui(instr_lui));
949     DCHECK(IsOri(instr_ori));
950 
951     int32_t imm_short = target_pos - (pos + Assembler::kBranchPCOffset);
952 
953     if (is_int16(imm_short)) {
954       // Optimize by converting to regular branch with 16-bit
955       // offset
956       Instr instr_b = BEQ;
957       instr_b = SetBranchOffset(pos, target_pos, instr_b);
958 
959       Instr instr_j = instr_at(pos + 5 * kInstrSize);
960       Instr instr_branch_delay;
961 
962       if (IsJump(instr_j)) {
963         instr_branch_delay = instr_at(pos + 6 * kInstrSize);
964       } else {
965         instr_branch_delay = instr_at(pos + 7 * kInstrSize);
966       }
967       instr_at_put(pos, instr_b);
968       instr_at_put(pos + 1 * kInstrSize, instr_branch_delay);
969     } else {
970       int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
971       DCHECK_EQ(imm & 3, 0);
972 
973       instr_lui &= ~kImm16Mask;
974       instr_ori &= ~kImm16Mask;
975 
976       instr_at_put(pos + 2 * kInstrSize,
977                    instr_lui | ((imm >> kLuiShift) & kImm16Mask));
978       instr_at_put(pos + 3 * kInstrSize, instr_ori | (imm & kImm16Mask));
979     }
980   } else if (IsJ(instr) || IsJal(instr)) {
981     int32_t imm28 = target_pos - pos;
982     DCHECK_EQ(imm28 & 3, 0);
983 
984     uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
985     DCHECK(is_uint26(imm26));
986     // Place 26-bit signed offset with markings.
987     // When code is committed it will be resolved to j/jal.
988     int32_t mark = IsJ(instr) ? kJRawMark : kJalRawMark;
989     instr_at_put(pos, mark | (imm26 & kImm26Mask));
990   } else {
991     int32_t imm28 = target_pos - pos;
992     DCHECK_EQ(imm28 & 3, 0);
993 
994     uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
995     DCHECK(is_uint26(imm26));
996     // Place raw 26-bit signed offset.
997     // When code is committed it will be resolved to j/jal.
998     instr &= ~kImm26Mask;
999     instr_at_put(pos, instr | (imm26 & kImm26Mask));
1000   }
1001 }
1002 
print(const Label * L)1003 void Assembler::print(const Label* L) {
1004   if (L->is_unused()) {
1005     PrintF("unused label\n");
1006   } else if (L->is_bound()) {
1007     PrintF("bound label to %d\n", L->pos());
1008   } else if (L->is_linked()) {
1009     Label l;
1010     l.link_to(L->pos());
1011     PrintF("unbound label");
1012     while (l.is_linked()) {
1013       PrintF("@ %d ", l.pos());
1014       Instr instr = instr_at(l.pos());
1015       if ((instr & ~kImm16Mask) == 0) {
1016         PrintF("value\n");
1017       } else {
1018         PrintF("%d\n", instr);
1019       }
1020       next(&l, is_internal_reference(&l));
1021     }
1022   } else {
1023     PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
1024   }
1025 }
1026 
1027 
bind_to(Label * L,int pos)1028 void Assembler::bind_to(Label* L, int pos) {
1029   DCHECK(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
1030   int trampoline_pos = kInvalidSlotPos;
1031   bool is_internal = false;
1032   if (L->is_linked() && !trampoline_emitted_) {
1033     unbound_labels_count_--;
1034     if (!is_internal_reference(L)) {
1035       next_buffer_check_ += kTrampolineSlotsSize;
1036     }
1037   }
1038 
1039   while (L->is_linked()) {
1040     int fixup_pos = L->pos();
1041     int dist = pos - fixup_pos;
1042     is_internal = is_internal_reference(L);
1043     next(L, is_internal);  // Call next before overwriting link with target at
1044                            // fixup_pos.
1045     Instr instr = instr_at(fixup_pos);
1046     if (is_internal) {
1047       target_at_put(fixup_pos, pos, is_internal);
1048     } else {
1049       if (IsBranch(instr)) {
1050         int branch_offset = BranchOffset(instr);
1051         if (dist > branch_offset) {
1052           if (trampoline_pos == kInvalidSlotPos) {
1053             trampoline_pos = get_trampoline_entry(fixup_pos);
1054             CHECK_NE(trampoline_pos, kInvalidSlotPos);
1055           }
1056           CHECK((trampoline_pos - fixup_pos) <= branch_offset);
1057           target_at_put(fixup_pos, trampoline_pos, false);
1058           fixup_pos = trampoline_pos;
1059         }
1060         target_at_put(fixup_pos, pos, false);
1061       } else {
1062         DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) ||
1063                IsEmittedConstant(instr) || IsMov(instr, t8, ra));
1064         target_at_put(fixup_pos, pos, false);
1065       }
1066     }
1067   }
1068   L->bind_to(pos);
1069 
1070   // Keep track of the last bound label so we don't eliminate any instructions
1071   // before a bound label.
1072   if (pos > last_bound_pos_)
1073     last_bound_pos_ = pos;
1074 }
1075 
1076 
bind(Label * L)1077 void Assembler::bind(Label* L) {
1078   DCHECK(!L->is_bound());  // Label can only be bound once.
1079   bind_to(L, pc_offset());
1080 }
1081 
1082 
next(Label * L,bool is_internal)1083 void Assembler::next(Label* L, bool is_internal) {
1084   DCHECK(L->is_linked());
1085   int link = target_at(L->pos(), is_internal);
1086   if (link == kEndOfChain) {
1087     L->Unuse();
1088   } else {
1089     DCHECK_GE(link, 0);
1090     L->link_to(link);
1091   }
1092 }
1093 
1094 
is_near(Label * L)1095 bool Assembler::is_near(Label* L) {
1096   DCHECK(L->is_bound());
1097   return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
1098 }
1099 
1100 
is_near(Label * L,OffsetSize bits)1101 bool Assembler::is_near(Label* L, OffsetSize bits) {
1102   if (L == nullptr || !L->is_bound()) return true;
1103   return ((pc_offset() - L->pos()) <
1104           (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize);
1105 }
1106 
1107 
is_near_branch(Label * L)1108 bool Assembler::is_near_branch(Label* L) {
1109   DCHECK(L->is_bound());
1110   return kArchVariant == kMips64r6 ? is_near_r6(L) : is_near_pre_r6(L);
1111 }
1112 
1113 
BranchOffset(Instr instr)1114 int Assembler::BranchOffset(Instr instr) {
1115   // At pre-R6 and for other R6 branches the offset is 16 bits.
1116   int bits = OffsetSize::kOffset16;
1117 
1118   if (kArchVariant == kMips64r6) {
1119     uint32_t opcode = GetOpcodeField(instr);
1120     switch (opcode) {
1121       // Checks BC or BALC.
1122       case BC:
1123       case BALC:
1124         bits = OffsetSize::kOffset26;
1125         break;
1126 
1127       // Checks BEQZC or BNEZC.
1128       case POP66:
1129       case POP76:
1130         if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
1131         break;
1132       default:
1133         break;
1134     }
1135   }
1136 
1137   return (1 << (bits + 2 - 1)) - 1;
1138 }
1139 
1140 
1141 // We have to use a temporary register for things that can be relocated even
1142 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
1143 // space.  There is no guarantee that the relocated location can be similarly
1144 // encoded.
MustUseReg(RelocInfo::Mode rmode)1145 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
1146   return !RelocInfo::IsNone(rmode);
1147 }
1148 
GenInstrRegister(Opcode opcode,Register rs,Register rt,Register rd,uint16_t sa,SecondaryField func)1149 void Assembler::GenInstrRegister(Opcode opcode,
1150                                  Register rs,
1151                                  Register rt,
1152                                  Register rd,
1153                                  uint16_t sa,
1154                                  SecondaryField func) {
1155   DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
1156   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1157       | (rd.code() << kRdShift) | (sa << kSaShift) | func;
1158   emit(instr);
1159 }
1160 
1161 
GenInstrRegister(Opcode opcode,Register rs,Register rt,uint16_t msb,uint16_t lsb,SecondaryField func)1162 void Assembler::GenInstrRegister(Opcode opcode,
1163                                  Register rs,
1164                                  Register rt,
1165                                  uint16_t msb,
1166                                  uint16_t lsb,
1167                                  SecondaryField func) {
1168   DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
1169   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1170       | (msb << kRdShift) | (lsb << kSaShift) | func;
1171   emit(instr);
1172 }
1173 
1174 
GenInstrRegister(Opcode opcode,SecondaryField fmt,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)1175 void Assembler::GenInstrRegister(Opcode opcode,
1176                                  SecondaryField fmt,
1177                                  FPURegister ft,
1178                                  FPURegister fs,
1179                                  FPURegister fd,
1180                                  SecondaryField func) {
1181   DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
1182   Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
1183       | (fd.code() << kFdShift) | func;
1184   emit(instr);
1185 }
1186 
1187 
GenInstrRegister(Opcode opcode,FPURegister fr,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)1188 void Assembler::GenInstrRegister(Opcode opcode,
1189                                  FPURegister fr,
1190                                  FPURegister ft,
1191                                  FPURegister fs,
1192                                  FPURegister fd,
1193                                  SecondaryField func) {
1194   DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
1195   Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
1196       | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1197   emit(instr);
1198 }
1199 
1200 
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPURegister fs,FPURegister fd,SecondaryField func)1201 void Assembler::GenInstrRegister(Opcode opcode,
1202                                  SecondaryField fmt,
1203                                  Register rt,
1204                                  FPURegister fs,
1205                                  FPURegister fd,
1206                                  SecondaryField func) {
1207   DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
1208   Instr instr = opcode | fmt | (rt.code() << kRtShift)
1209       | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1210   emit(instr);
1211 }
1212 
1213 
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPUControlRegister fs,SecondaryField func)1214 void Assembler::GenInstrRegister(Opcode opcode,
1215                                  SecondaryField fmt,
1216                                  Register rt,
1217                                  FPUControlRegister fs,
1218                                  SecondaryField func) {
1219   DCHECK(fs.is_valid() && rt.is_valid());
1220   Instr instr =
1221       opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
1222   emit(instr);
1223 }
1224 
1225 
1226 // Instructions with immediate value.
1227 // Registers are in the order of the instruction encoding, from left to right.
GenInstrImmediate(Opcode opcode,Register rs,Register rt,int32_t j,CompactBranchType is_compact_branch)1228 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
1229                                   int32_t j,
1230                                   CompactBranchType is_compact_branch) {
1231   DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
1232   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1233       | (j & kImm16Mask);
1234   emit(instr, is_compact_branch);
1235 }
1236 
GenInstrImmediate(Opcode opcode,Register base,Register rt,int32_t offset9,int bit6,SecondaryField func)1237 void Assembler::GenInstrImmediate(Opcode opcode, Register base, Register rt,
1238                                   int32_t offset9, int bit6,
1239                                   SecondaryField func) {
1240   DCHECK(base.is_valid() && rt.is_valid() && is_int9(offset9) &&
1241          is_uint1(bit6));
1242   Instr instr = opcode | (base.code() << kBaseShift) | (rt.code() << kRtShift) |
1243                 ((offset9 << kImm9Shift) & kImm9Mask) | bit6 << kBit6Shift |
1244                 func;
1245   emit(instr);
1246 }
1247 
GenInstrImmediate(Opcode opcode,Register rs,SecondaryField SF,int32_t j,CompactBranchType is_compact_branch)1248 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
1249                                   int32_t j,
1250                                   CompactBranchType is_compact_branch) {
1251   DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
1252   Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
1253   emit(instr, is_compact_branch);
1254 }
1255 
1256 
GenInstrImmediate(Opcode opcode,Register rs,FPURegister ft,int32_t j,CompactBranchType is_compact_branch)1257 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
1258                                   int32_t j,
1259                                   CompactBranchType is_compact_branch) {
1260   DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
1261   Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
1262       | (j & kImm16Mask);
1263   emit(instr, is_compact_branch);
1264 }
1265 
1266 
GenInstrImmediate(Opcode opcode,Register rs,int32_t offset21,CompactBranchType is_compact_branch)1267 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
1268                                   CompactBranchType is_compact_branch) {
1269   DCHECK(rs.is_valid() && (is_int21(offset21)));
1270   Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1271   emit(instr, is_compact_branch);
1272 }
1273 
1274 
GenInstrImmediate(Opcode opcode,Register rs,uint32_t offset21)1275 void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
1276                                   uint32_t offset21) {
1277   DCHECK(rs.is_valid() && (is_uint21(offset21)));
1278   Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1279   emit(instr);
1280 }
1281 
1282 
GenInstrImmediate(Opcode opcode,int32_t offset26,CompactBranchType is_compact_branch)1283 void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
1284                                   CompactBranchType is_compact_branch) {
1285   DCHECK(is_int26(offset26));
1286   Instr instr = opcode | (offset26 & kImm26Mask);
1287   emit(instr, is_compact_branch);
1288 }
1289 
1290 
GenInstrJump(Opcode opcode,uint32_t address)1291 void Assembler::GenInstrJump(Opcode opcode,
1292                              uint32_t address) {
1293   BlockTrampolinePoolScope block_trampoline_pool(this);
1294   DCHECK(is_uint26(address));
1295   Instr instr = opcode | address;
1296   emit(instr);
1297   BlockTrampolinePoolFor(1);  // For associated delay slot.
1298 }
1299 
1300 // MSA instructions
GenInstrMsaI8(SecondaryField operation,uint32_t imm8,MSARegister ws,MSARegister wd)1301 void Assembler::GenInstrMsaI8(SecondaryField operation, uint32_t imm8,
1302                               MSARegister ws, MSARegister wd) {
1303   DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1304   DCHECK(ws.is_valid() && wd.is_valid() && is_uint8(imm8));
1305   Instr instr = MSA | operation | ((imm8 & kImm8Mask) << kWtShift) |
1306                 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1307   emit(instr);
1308 }
1309 
GenInstrMsaI5(SecondaryField operation,SecondaryField df,int32_t imm5,MSARegister ws,MSARegister wd)1310 void Assembler::GenInstrMsaI5(SecondaryField operation, SecondaryField df,
1311                               int32_t imm5, MSARegister ws, MSARegister wd) {
1312   DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1313   DCHECK(ws.is_valid() && wd.is_valid());
1314   DCHECK((operation == MAXI_S) || (operation == MINI_S) ||
1315                  (operation == CEQI) || (operation == CLTI_S) ||
1316                  (operation == CLEI_S)
1317              ? is_int5(imm5)
1318              : is_uint5(imm5));
1319   Instr instr = MSA | operation | df | ((imm5 & kImm5Mask) << kWtShift) |
1320                 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1321   emit(instr);
1322 }
1323 
GenInstrMsaBit(SecondaryField operation,SecondaryField df,uint32_t m,MSARegister ws,MSARegister wd)1324 void Assembler::GenInstrMsaBit(SecondaryField operation, SecondaryField df,
1325                                uint32_t m, MSARegister ws, MSARegister wd) {
1326   DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1327   DCHECK(ws.is_valid() && wd.is_valid() && is_valid_msa_df_m(df, m));
1328   Instr instr = MSA | operation | df | (m << kWtShift) |
1329                 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1330   emit(instr);
1331 }
1332 
GenInstrMsaI10(SecondaryField operation,SecondaryField df,int32_t imm10,MSARegister wd)1333 void Assembler::GenInstrMsaI10(SecondaryField operation, SecondaryField df,
1334                                int32_t imm10, MSARegister wd) {
1335   DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1336   DCHECK(wd.is_valid() && is_int10(imm10));
1337   Instr instr = MSA | operation | df | ((imm10 & kImm10Mask) << kWsShift) |
1338                 (wd.code() << kWdShift);
1339   emit(instr);
1340 }
1341 
1342 template <typename RegType>
GenInstrMsa3R(SecondaryField operation,SecondaryField df,RegType t,MSARegister ws,MSARegister wd)1343 void Assembler::GenInstrMsa3R(SecondaryField operation, SecondaryField df,
1344                               RegType t, MSARegister ws, MSARegister wd) {
1345   DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1346   DCHECK(t.is_valid() && ws.is_valid() && wd.is_valid());
1347   Instr instr = MSA | operation | df | (t.code() << kWtShift) |
1348                 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1349   emit(instr);
1350 }
1351 
1352 template <typename DstType, typename SrcType>
GenInstrMsaElm(SecondaryField operation,SecondaryField df,uint32_t n,SrcType src,DstType dst)1353 void Assembler::GenInstrMsaElm(SecondaryField operation, SecondaryField df,
1354                                uint32_t n, SrcType src, DstType dst) {
1355   DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1356   DCHECK(src.is_valid() && dst.is_valid() && is_valid_msa_df_n(df, n));
1357   Instr instr = MSA | operation | df | (n << kWtShift) |
1358                 (src.code() << kWsShift) | (dst.code() << kWdShift) |
1359                 MSA_ELM_MINOR;
1360   emit(instr);
1361 }
1362 
GenInstrMsa3RF(SecondaryField operation,uint32_t df,MSARegister wt,MSARegister ws,MSARegister wd)1363 void Assembler::GenInstrMsa3RF(SecondaryField operation, uint32_t df,
1364                                MSARegister wt, MSARegister ws, MSARegister wd) {
1365   DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1366   DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
1367   DCHECK_LT(df, 2);
1368   Instr instr = MSA | operation | (df << 21) | (wt.code() << kWtShift) |
1369                 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1370   emit(instr);
1371 }
1372 
GenInstrMsaVec(SecondaryField operation,MSARegister wt,MSARegister ws,MSARegister wd)1373 void Assembler::GenInstrMsaVec(SecondaryField operation, MSARegister wt,
1374                                MSARegister ws, MSARegister wd) {
1375   DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1376   DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
1377   Instr instr = MSA | operation | (wt.code() << kWtShift) |
1378                 (ws.code() << kWsShift) | (wd.code() << kWdShift) |
1379                 MSA_VEC_2R_2RF_MINOR;
1380   emit(instr);
1381 }
1382 
GenInstrMsaMI10(SecondaryField operation,int32_t s10,Register rs,MSARegister wd)1383 void Assembler::GenInstrMsaMI10(SecondaryField operation, int32_t s10,
1384                                 Register rs, MSARegister wd) {
1385   DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1386   DCHECK(rs.is_valid() && wd.is_valid() && is_int10(s10));
1387   Instr instr = MSA | operation | ((s10 & kImm10Mask) << kWtShift) |
1388                 (rs.code() << kWsShift) | (wd.code() << kWdShift);
1389   emit(instr);
1390 }
1391 
GenInstrMsa2R(SecondaryField operation,SecondaryField df,MSARegister ws,MSARegister wd)1392 void Assembler::GenInstrMsa2R(SecondaryField operation, SecondaryField df,
1393                               MSARegister ws, MSARegister wd) {
1394   DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1395   DCHECK(ws.is_valid() && wd.is_valid());
1396   Instr instr = MSA | MSA_2R_FORMAT | operation | df | (ws.code() << kWsShift) |
1397                 (wd.code() << kWdShift) | MSA_VEC_2R_2RF_MINOR;
1398   emit(instr);
1399 }
1400 
GenInstrMsa2RF(SecondaryField operation,SecondaryField df,MSARegister ws,MSARegister wd)1401 void Assembler::GenInstrMsa2RF(SecondaryField operation, SecondaryField df,
1402                                MSARegister ws, MSARegister wd) {
1403   DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1404   DCHECK(ws.is_valid() && wd.is_valid());
1405   Instr instr = MSA | MSA_2RF_FORMAT | operation | df |
1406                 (ws.code() << kWsShift) | (wd.code() << kWdShift) |
1407                 MSA_VEC_2R_2RF_MINOR;
1408   emit(instr);
1409 }
1410 
GenInstrMsaBranch(SecondaryField operation,MSARegister wt,int32_t offset16)1411 void Assembler::GenInstrMsaBranch(SecondaryField operation, MSARegister wt,
1412                                   int32_t offset16) {
1413   DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1414   DCHECK(wt.is_valid() && is_int16(offset16));
1415   BlockTrampolinePoolScope block_trampoline_pool(this);
1416   Instr instr =
1417       COP1 | operation | (wt.code() << kWtShift) | (offset16 & kImm16Mask);
1418   emit(instr);
1419   BlockTrampolinePoolFor(1);  // For associated delay slot.
1420 }
1421 
1422 // Returns the next free trampoline entry.
get_trampoline_entry(int32_t pos)1423 int32_t Assembler::get_trampoline_entry(int32_t pos) {
1424   int32_t trampoline_entry = kInvalidSlotPos;
1425   if (!internal_trampoline_exception_) {
1426     if (trampoline_.start() > pos) {
1427      trampoline_entry = trampoline_.take_slot();
1428     }
1429 
1430     if (kInvalidSlotPos == trampoline_entry) {
1431       internal_trampoline_exception_ = true;
1432     }
1433   }
1434   return trampoline_entry;
1435 }
1436 
1437 
jump_address(Label * L)1438 uint64_t Assembler::jump_address(Label* L) {
1439   int64_t target_pos;
1440   if (L->is_bound()) {
1441     target_pos = L->pos();
1442   } else {
1443     if (L->is_linked()) {
1444       target_pos = L->pos();  // L's link.
1445       L->link_to(pc_offset());
1446     } else {
1447       L->link_to(pc_offset());
1448       return kEndOfJumpChain;
1449     }
1450   }
1451   uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
1452   DCHECK_EQ(imm & 3, 0);
1453 
1454   return imm;
1455 }
1456 
1457 
jump_offset(Label * L)1458 uint64_t Assembler::jump_offset(Label* L) {
1459   int64_t target_pos;
1460   int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1461 
1462   if (L->is_bound()) {
1463     target_pos = L->pos();
1464   } else {
1465     if (L->is_linked()) {
1466       target_pos = L->pos();  // L's link.
1467       L->link_to(pc_offset() + pad);
1468     } else {
1469       L->link_to(pc_offset() + pad);
1470       return kEndOfJumpChain;
1471     }
1472   }
1473   int64_t imm = target_pos - (pc_offset() + pad);
1474   DCHECK_EQ(imm & 3, 0);
1475 
1476   return static_cast<uint64_t>(imm);
1477 }
1478 
branch_long_offset(Label * L)1479 uint64_t Assembler::branch_long_offset(Label* L) {
1480   int64_t target_pos;
1481 
1482   if (L->is_bound()) {
1483     target_pos = L->pos();
1484   } else {
1485     if (L->is_linked()) {
1486       target_pos = L->pos();  // L's link.
1487       L->link_to(pc_offset());
1488     } else {
1489       L->link_to(pc_offset());
1490       return kEndOfJumpChain;
1491     }
1492   }
1493   int64_t offset = target_pos - (pc_offset() + kLongBranchPCOffset);
1494   DCHECK_EQ(offset & 3, 0);
1495 
1496   return static_cast<uint64_t>(offset);
1497 }
1498 
branch_offset_helper(Label * L,OffsetSize bits)1499 int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
1500   int32_t target_pos;
1501   int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1502 
1503   if (L->is_bound()) {
1504     target_pos = L->pos();
1505   } else {
1506     if (L->is_linked()) {
1507       target_pos = L->pos();
1508       L->link_to(pc_offset() + pad);
1509     } else {
1510       L->link_to(pc_offset() + pad);
1511       if (!trampoline_emitted_) {
1512         unbound_labels_count_++;
1513         next_buffer_check_ -= kTrampolineSlotsSize;
1514       }
1515       return kEndOfChain;
1516     }
1517   }
1518 
1519   int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
1520   DCHECK(is_intn(offset, bits + 2));
1521   DCHECK_EQ(offset & 3, 0);
1522 
1523   return offset;
1524 }
1525 
1526 
label_at_put(Label * L,int at_offset)1527 void Assembler::label_at_put(Label* L, int at_offset) {
1528   int target_pos;
1529   if (L->is_bound()) {
1530     target_pos = L->pos();
1531     instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1532   } else {
1533     if (L->is_linked()) {
1534       target_pos = L->pos();  // L's link.
1535       int32_t imm18 = target_pos - at_offset;
1536       DCHECK_EQ(imm18 & 3, 0);
1537       int32_t imm16 = imm18 >> 2;
1538       DCHECK(is_int16(imm16));
1539       instr_at_put(at_offset, (imm16 & kImm16Mask));
1540     } else {
1541       target_pos = kEndOfChain;
1542       instr_at_put(at_offset, 0);
1543       if (!trampoline_emitted_) {
1544         unbound_labels_count_++;
1545         next_buffer_check_ -= kTrampolineSlotsSize;
1546       }
1547     }
1548     L->link_to(at_offset);
1549   }
1550 }
1551 
1552 
1553 //------- Branch and jump instructions --------
1554 
b(int16_t offset)1555 void Assembler::b(int16_t offset) {
1556   beq(zero_reg, zero_reg, offset);
1557 }
1558 
1559 
bal(int16_t offset)1560 void Assembler::bal(int16_t offset) {
1561   bgezal(zero_reg, offset);
1562 }
1563 
1564 
bc(int32_t offset)1565 void Assembler::bc(int32_t offset) {
1566   DCHECK_EQ(kArchVariant, kMips64r6);
1567   GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
1568 }
1569 
1570 
balc(int32_t offset)1571 void Assembler::balc(int32_t offset) {
1572   DCHECK_EQ(kArchVariant, kMips64r6);
1573   GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
1574 }
1575 
1576 
beq(Register rs,Register rt,int16_t offset)1577 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1578   BlockTrampolinePoolScope block_trampoline_pool(this);
1579   GenInstrImmediate(BEQ, rs, rt, offset);
1580   BlockTrampolinePoolFor(1);  // For associated delay slot.
1581 }
1582 
1583 
bgez(Register rs,int16_t offset)1584 void Assembler::bgez(Register rs, int16_t offset) {
1585   BlockTrampolinePoolScope block_trampoline_pool(this);
1586   GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1587   BlockTrampolinePoolFor(1);  // For associated delay slot.
1588 }
1589 
1590 
bgezc(Register rt,int16_t offset)1591 void Assembler::bgezc(Register rt, int16_t offset) {
1592   DCHECK_EQ(kArchVariant, kMips64r6);
1593   DCHECK(rt != zero_reg);
1594   GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1595 }
1596 
1597 
bgeuc(Register rs,Register rt,int16_t offset)1598 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1599   DCHECK_EQ(kArchVariant, kMips64r6);
1600   DCHECK(rs != zero_reg);
1601   DCHECK(rt != zero_reg);
1602   DCHECK(rs.code() != rt.code());
1603   GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1604 }
1605 
1606 
bgec(Register rs,Register rt,int16_t offset)1607 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1608   DCHECK_EQ(kArchVariant, kMips64r6);
1609   DCHECK(rs != zero_reg);
1610   DCHECK(rt != zero_reg);
1611   DCHECK(rs.code() != rt.code());
1612   GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1613 }
1614 
1615 
bgezal(Register rs,int16_t offset)1616 void Assembler::bgezal(Register rs, int16_t offset) {
1617   DCHECK(kArchVariant != kMips64r6 || rs == zero_reg);
1618   DCHECK(rs != ra);
1619   BlockTrampolinePoolScope block_trampoline_pool(this);
1620   GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1621   BlockTrampolinePoolFor(1);  // For associated delay slot.
1622 }
1623 
1624 
bgtz(Register rs,int16_t offset)1625 void Assembler::bgtz(Register rs, int16_t offset) {
1626   BlockTrampolinePoolScope block_trampoline_pool(this);
1627   GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1628   BlockTrampolinePoolFor(1);  // For associated delay slot.
1629 }
1630 
1631 
bgtzc(Register rt,int16_t offset)1632 void Assembler::bgtzc(Register rt, int16_t offset) {
1633   DCHECK_EQ(kArchVariant, kMips64r6);
1634   DCHECK(rt != zero_reg);
1635   GenInstrImmediate(BGTZL, zero_reg, rt, offset,
1636                     CompactBranchType::COMPACT_BRANCH);
1637 }
1638 
1639 
blez(Register rs,int16_t offset)1640 void Assembler::blez(Register rs, int16_t offset) {
1641   BlockTrampolinePoolScope block_trampoline_pool(this);
1642   GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1643   BlockTrampolinePoolFor(1);  // For associated delay slot.
1644 }
1645 
1646 
blezc(Register rt,int16_t offset)1647 void Assembler::blezc(Register rt, int16_t offset) {
1648   DCHECK_EQ(kArchVariant, kMips64r6);
1649   DCHECK(rt != zero_reg);
1650   GenInstrImmediate(BLEZL, zero_reg, rt, offset,
1651                     CompactBranchType::COMPACT_BRANCH);
1652 }
1653 
1654 
bltzc(Register rt,int16_t offset)1655 void Assembler::bltzc(Register rt, int16_t offset) {
1656   DCHECK_EQ(kArchVariant, kMips64r6);
1657   DCHECK(rt != zero_reg);
1658   GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1659 }
1660 
1661 
bltuc(Register rs,Register rt,int16_t offset)1662 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1663   DCHECK_EQ(kArchVariant, kMips64r6);
1664   DCHECK(rs != zero_reg);
1665   DCHECK(rt != zero_reg);
1666   DCHECK(rs.code() != rt.code());
1667   GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1668 }
1669 
1670 
bltc(Register rs,Register rt,int16_t offset)1671 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1672   DCHECK_EQ(kArchVariant, kMips64r6);
1673   DCHECK(rs != zero_reg);
1674   DCHECK(rt != zero_reg);
1675   DCHECK(rs.code() != rt.code());
1676   GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1677 }
1678 
1679 
bltz(Register rs,int16_t offset)1680 void Assembler::bltz(Register rs, int16_t offset) {
1681   BlockTrampolinePoolScope block_trampoline_pool(this);
1682   GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1683   BlockTrampolinePoolFor(1);  // For associated delay slot.
1684 }
1685 
1686 
bltzal(Register rs,int16_t offset)1687 void Assembler::bltzal(Register rs, int16_t offset) {
1688   DCHECK(kArchVariant != kMips64r6 || rs == zero_reg);
1689   DCHECK(rs != ra);
1690   BlockTrampolinePoolScope block_trampoline_pool(this);
1691   GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1692   BlockTrampolinePoolFor(1);  // For associated delay slot.
1693 }
1694 
1695 
bne(Register rs,Register rt,int16_t offset)1696 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1697   BlockTrampolinePoolScope block_trampoline_pool(this);
1698   GenInstrImmediate(BNE, rs, rt, offset);
1699   BlockTrampolinePoolFor(1);  // For associated delay slot.
1700 }
1701 
1702 
bovc(Register rs,Register rt,int16_t offset)1703 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1704   DCHECK_EQ(kArchVariant, kMips64r6);
1705   if (rs.code() >= rt.code()) {
1706     GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1707   } else {
1708     GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1709   }
1710 }
1711 
1712 
bnvc(Register rs,Register rt,int16_t offset)1713 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1714   DCHECK_EQ(kArchVariant, kMips64r6);
1715   if (rs.code() >= rt.code()) {
1716     GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1717   } else {
1718     GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1719   }
1720 }
1721 
1722 
blezalc(Register rt,int16_t offset)1723 void Assembler::blezalc(Register rt, int16_t offset) {
1724   DCHECK_EQ(kArchVariant, kMips64r6);
1725   DCHECK(rt != zero_reg);
1726   DCHECK(rt != ra);
1727   GenInstrImmediate(BLEZ, zero_reg, rt, offset,
1728                     CompactBranchType::COMPACT_BRANCH);
1729 }
1730 
1731 
bgezalc(Register rt,int16_t offset)1732 void Assembler::bgezalc(Register rt, int16_t offset) {
1733   DCHECK_EQ(kArchVariant, kMips64r6);
1734   DCHECK(rt != zero_reg);
1735   DCHECK(rt != ra);
1736   GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1737 }
1738 
1739 
bgezall(Register rs,int16_t offset)1740 void Assembler::bgezall(Register rs, int16_t offset) {
1741   DCHECK_NE(kArchVariant, kMips64r6);
1742   DCHECK(rs != zero_reg);
1743   DCHECK(rs != ra);
1744   BlockTrampolinePoolScope block_trampoline_pool(this);
1745   GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1746   BlockTrampolinePoolFor(1);  // For associated delay slot.
1747 }
1748 
1749 
bltzalc(Register rt,int16_t offset)1750 void Assembler::bltzalc(Register rt, int16_t offset) {
1751   DCHECK_EQ(kArchVariant, kMips64r6);
1752   DCHECK(rt != zero_reg);
1753   DCHECK(rt != ra);
1754   GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1755 }
1756 
1757 
bgtzalc(Register rt,int16_t offset)1758 void Assembler::bgtzalc(Register rt, int16_t offset) {
1759   DCHECK_EQ(kArchVariant, kMips64r6);
1760   DCHECK(rt != zero_reg);
1761   DCHECK(rt != ra);
1762   GenInstrImmediate(BGTZ, zero_reg, rt, offset,
1763                     CompactBranchType::COMPACT_BRANCH);
1764 }
1765 
1766 
beqzalc(Register rt,int16_t offset)1767 void Assembler::beqzalc(Register rt, int16_t offset) {
1768   DCHECK_EQ(kArchVariant, kMips64r6);
1769   DCHECK(rt != zero_reg);
1770   DCHECK(rt != ra);
1771   GenInstrImmediate(ADDI, zero_reg, rt, offset,
1772                     CompactBranchType::COMPACT_BRANCH);
1773 }
1774 
1775 
bnezalc(Register rt,int16_t offset)1776 void Assembler::bnezalc(Register rt, int16_t offset) {
1777   DCHECK_EQ(kArchVariant, kMips64r6);
1778   DCHECK(rt != zero_reg);
1779   DCHECK(rt != ra);
1780   GenInstrImmediate(DADDI, zero_reg, rt, offset,
1781                     CompactBranchType::COMPACT_BRANCH);
1782 }
1783 
1784 
beqc(Register rs,Register rt,int16_t offset)1785 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1786   DCHECK_EQ(kArchVariant, kMips64r6);
1787   DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1788   if (rs.code() < rt.code()) {
1789     GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1790   } else {
1791     GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1792   }
1793 }
1794 
1795 
beqzc(Register rs,int32_t offset)1796 void Assembler::beqzc(Register rs, int32_t offset) {
1797   DCHECK_EQ(kArchVariant, kMips64r6);
1798   DCHECK(rs != zero_reg);
1799   GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
1800 }
1801 
1802 
bnec(Register rs,Register rt,int16_t offset)1803 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1804   DCHECK_EQ(kArchVariant, kMips64r6);
1805   DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1806   if (rs.code() < rt.code()) {
1807     GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1808   } else {
1809     GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1810   }
1811 }
1812 
1813 
bnezc(Register rs,int32_t offset)1814 void Assembler::bnezc(Register rs, int32_t offset) {
1815   DCHECK_EQ(kArchVariant, kMips64r6);
1816   DCHECK(rs != zero_reg);
1817   GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
1818 }
1819 
1820 
j(int64_t target)1821 void Assembler::j(int64_t target) {
1822   BlockTrampolinePoolScope block_trampoline_pool(this);
1823   GenInstrJump(J, static_cast<uint32_t>(target >> 2) & kImm26Mask);
1824   BlockTrampolinePoolFor(1);  // For associated delay slot.
1825 }
1826 
1827 
j(Label * target)1828 void Assembler::j(Label* target) {
1829   uint64_t imm = jump_offset(target);
1830   if (target->is_bound()) {
1831     BlockTrampolinePoolScope block_trampoline_pool(this);
1832     GenInstrJump(static_cast<Opcode>(kJRawMark),
1833                  static_cast<uint32_t>(imm >> 2) & kImm26Mask);
1834     BlockTrampolinePoolFor(1);  // For associated delay slot.
1835   } else {
1836     j(imm);
1837   }
1838 }
1839 
1840 
jal(Label * target)1841 void Assembler::jal(Label* target) {
1842   uint64_t imm = jump_offset(target);
1843   if (target->is_bound()) {
1844     BlockTrampolinePoolScope block_trampoline_pool(this);
1845     GenInstrJump(static_cast<Opcode>(kJalRawMark),
1846                  static_cast<uint32_t>(imm >> 2) & kImm26Mask);
1847     BlockTrampolinePoolFor(1);  // For associated delay slot.
1848   } else {
1849     jal(imm);
1850   }
1851 }
1852 
1853 
jr(Register rs)1854 void Assembler::jr(Register rs) {
1855   if (kArchVariant != kMips64r6) {
1856     BlockTrampolinePoolScope block_trampoline_pool(this);
1857     GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1858     BlockTrampolinePoolFor(1);  // For associated delay slot.
1859   } else {
1860     jalr(rs, zero_reg);
1861   }
1862 }
1863 
1864 
jal(int64_t target)1865 void Assembler::jal(int64_t target) {
1866   BlockTrampolinePoolScope block_trampoline_pool(this);
1867   GenInstrJump(JAL, static_cast<uint32_t>(target >> 2) & kImm26Mask);
1868   BlockTrampolinePoolFor(1);  // For associated delay slot.
1869 }
1870 
1871 
jalr(Register rs,Register rd)1872 void Assembler::jalr(Register rs, Register rd) {
1873   DCHECK(rs.code() != rd.code());
1874   BlockTrampolinePoolScope block_trampoline_pool(this);
1875   GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1876   BlockTrampolinePoolFor(1);  // For associated delay slot.
1877 }
1878 
1879 
jic(Register rt,int16_t offset)1880 void Assembler::jic(Register rt, int16_t offset) {
1881   DCHECK_EQ(kArchVariant, kMips64r6);
1882   GenInstrImmediate(POP66, zero_reg, rt, offset);
1883 }
1884 
1885 
jialc(Register rt,int16_t offset)1886 void Assembler::jialc(Register rt, int16_t offset) {
1887   DCHECK_EQ(kArchVariant, kMips64r6);
1888   GenInstrImmediate(POP76, zero_reg, rt, offset);
1889 }
1890 
1891 
1892 // -------Data-processing-instructions---------
1893 
1894 // Arithmetic.
1895 
addu(Register rd,Register rs,Register rt)1896 void Assembler::addu(Register rd, Register rs, Register rt) {
1897   GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1898 }
1899 
1900 
addiu(Register rd,Register rs,int32_t j)1901 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1902   GenInstrImmediate(ADDIU, rs, rd, j);
1903 }
1904 
1905 
subu(Register rd,Register rs,Register rt)1906 void Assembler::subu(Register rd, Register rs, Register rt) {
1907   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1908 }
1909 
1910 
mul(Register rd,Register rs,Register rt)1911 void Assembler::mul(Register rd, Register rs, Register rt) {
1912   if (kArchVariant == kMips64r6) {
1913       GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1914   } else {
1915       GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1916   }
1917 }
1918 
1919 
muh(Register rd,Register rs,Register rt)1920 void Assembler::muh(Register rd, Register rs, Register rt) {
1921   DCHECK_EQ(kArchVariant, kMips64r6);
1922   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1923 }
1924 
1925 
mulu(Register rd,Register rs,Register rt)1926 void Assembler::mulu(Register rd, Register rs, Register rt) {
1927   DCHECK_EQ(kArchVariant, kMips64r6);
1928   GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1929 }
1930 
1931 
muhu(Register rd,Register rs,Register rt)1932 void Assembler::muhu(Register rd, Register rs, Register rt) {
1933   DCHECK_EQ(kArchVariant, kMips64r6);
1934   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1935 }
1936 
1937 
dmul(Register rd,Register rs,Register rt)1938 void Assembler::dmul(Register rd, Register rs, Register rt) {
1939   DCHECK_EQ(kArchVariant, kMips64r6);
1940   GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH);
1941 }
1942 
1943 
dmuh(Register rd,Register rs,Register rt)1944 void Assembler::dmuh(Register rd, Register rs, Register rt) {
1945   DCHECK_EQ(kArchVariant, kMips64r6);
1946   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH);
1947 }
1948 
1949 
dmulu(Register rd,Register rs,Register rt)1950 void Assembler::dmulu(Register rd, Register rs, Register rt) {
1951   DCHECK_EQ(kArchVariant, kMips64r6);
1952   GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U);
1953 }
1954 
1955 
dmuhu(Register rd,Register rs,Register rt)1956 void Assembler::dmuhu(Register rd, Register rs, Register rt) {
1957   DCHECK_EQ(kArchVariant, kMips64r6);
1958   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U);
1959 }
1960 
1961 
mult(Register rs,Register rt)1962 void Assembler::mult(Register rs, Register rt) {
1963   DCHECK_NE(kArchVariant, kMips64r6);
1964   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1965 }
1966 
1967 
multu(Register rs,Register rt)1968 void Assembler::multu(Register rs, Register rt) {
1969   DCHECK_NE(kArchVariant, kMips64r6);
1970   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1971 }
1972 
1973 
daddiu(Register rd,Register rs,int32_t j)1974 void Assembler::daddiu(Register rd, Register rs, int32_t j) {
1975   GenInstrImmediate(DADDIU, rs, rd, j);
1976 }
1977 
1978 
div(Register rs,Register rt)1979 void Assembler::div(Register rs, Register rt) {
1980   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1981 }
1982 
1983 
div(Register rd,Register rs,Register rt)1984 void Assembler::div(Register rd, Register rs, Register rt) {
1985   DCHECK_EQ(kArchVariant, kMips64r6);
1986   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1987 }
1988 
1989 
mod(Register rd,Register rs,Register rt)1990 void Assembler::mod(Register rd, Register rs, Register rt) {
1991   DCHECK_EQ(kArchVariant, kMips64r6);
1992   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1993 }
1994 
1995 
divu(Register rs,Register rt)1996 void Assembler::divu(Register rs, Register rt) {
1997   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1998 }
1999 
2000 
divu(Register rd,Register rs,Register rt)2001 void Assembler::divu(Register rd, Register rs, Register rt) {
2002   DCHECK_EQ(kArchVariant, kMips64r6);
2003   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
2004 }
2005 
2006 
modu(Register rd,Register rs,Register rt)2007 void Assembler::modu(Register rd, Register rs, Register rt) {
2008   DCHECK_EQ(kArchVariant, kMips64r6);
2009   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
2010 }
2011 
2012 
daddu(Register rd,Register rs,Register rt)2013 void Assembler::daddu(Register rd, Register rs, Register rt) {
2014   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
2015 }
2016 
2017 
dsubu(Register rd,Register rs,Register rt)2018 void Assembler::dsubu(Register rd, Register rs, Register rt) {
2019   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU);
2020 }
2021 
2022 
dmult(Register rs,Register rt)2023 void Assembler::dmult(Register rs, Register rt) {
2024   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT);
2025 }
2026 
2027 
dmultu(Register rs,Register rt)2028 void Assembler::dmultu(Register rs, Register rt) {
2029   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU);
2030 }
2031 
2032 
ddiv(Register rs,Register rt)2033 void Assembler::ddiv(Register rs, Register rt) {
2034   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV);
2035 }
2036 
2037 
ddiv(Register rd,Register rs,Register rt)2038 void Assembler::ddiv(Register rd, Register rs, Register rt) {
2039   DCHECK_EQ(kArchVariant, kMips64r6);
2040   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD);
2041 }
2042 
2043 
dmod(Register rd,Register rs,Register rt)2044 void Assembler::dmod(Register rd, Register rs, Register rt) {
2045   DCHECK_EQ(kArchVariant, kMips64r6);
2046   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD);
2047 }
2048 
2049 
ddivu(Register rs,Register rt)2050 void Assembler::ddivu(Register rs, Register rt) {
2051   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
2052 }
2053 
2054 
ddivu(Register rd,Register rs,Register rt)2055 void Assembler::ddivu(Register rd, Register rs, Register rt) {
2056   DCHECK_EQ(kArchVariant, kMips64r6);
2057   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U);
2058 }
2059 
2060 
dmodu(Register rd,Register rs,Register rt)2061 void Assembler::dmodu(Register rd, Register rs, Register rt) {
2062   DCHECK_EQ(kArchVariant, kMips64r6);
2063   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U);
2064 }
2065 
2066 
2067 // Logical.
2068 
and_(Register rd,Register rs,Register rt)2069 void Assembler::and_(Register rd, Register rs, Register rt) {
2070   GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
2071 }
2072 
2073 
andi(Register rt,Register rs,int32_t j)2074 void Assembler::andi(Register rt, Register rs, int32_t j) {
2075   DCHECK(is_uint16(j));
2076   GenInstrImmediate(ANDI, rs, rt, j);
2077 }
2078 
2079 
or_(Register rd,Register rs,Register rt)2080 void Assembler::or_(Register rd, Register rs, Register rt) {
2081   GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
2082 }
2083 
2084 
ori(Register rt,Register rs,int32_t j)2085 void Assembler::ori(Register rt, Register rs, int32_t j) {
2086   DCHECK(is_uint16(j));
2087   GenInstrImmediate(ORI, rs, rt, j);
2088 }
2089 
2090 
xor_(Register rd,Register rs,Register rt)2091 void Assembler::xor_(Register rd, Register rs, Register rt) {
2092   GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
2093 }
2094 
2095 
xori(Register rt,Register rs,int32_t j)2096 void Assembler::xori(Register rt, Register rs, int32_t j) {
2097   DCHECK(is_uint16(j));
2098   GenInstrImmediate(XORI, rs, rt, j);
2099 }
2100 
2101 
nor(Register rd,Register rs,Register rt)2102 void Assembler::nor(Register rd, Register rs, Register rt) {
2103   GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
2104 }
2105 
2106 
2107 // Shifts.
sll(Register rd,Register rt,uint16_t sa,bool coming_from_nop)2108 void Assembler::sll(Register rd,
2109                     Register rt,
2110                     uint16_t sa,
2111                     bool coming_from_nop) {
2112   // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
2113   // generated using the sll instruction. They must be generated using
2114   // nop(int/NopMarkerTypes).
2115   DCHECK(coming_from_nop || (rd != zero_reg && rt != zero_reg));
2116   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
2117 }
2118 
2119 
sllv(Register rd,Register rt,Register rs)2120 void Assembler::sllv(Register rd, Register rt, Register rs) {
2121   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
2122 }
2123 
2124 
srl(Register rd,Register rt,uint16_t sa)2125 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
2126   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
2127 }
2128 
2129 
srlv(Register rd,Register rt,Register rs)2130 void Assembler::srlv(Register rd, Register rt, Register rs) {
2131   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
2132 }
2133 
2134 
sra(Register rd,Register rt,uint16_t sa)2135 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
2136   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
2137 }
2138 
2139 
srav(Register rd,Register rt,Register rs)2140 void Assembler::srav(Register rd, Register rt, Register rs) {
2141   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
2142 }
2143 
2144 
rotr(Register rd,Register rt,uint16_t sa)2145 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
2146   // Should be called via MacroAssembler::Ror.
2147   DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
2148   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2149   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
2150       | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
2151   emit(instr);
2152 }
2153 
2154 
rotrv(Register rd,Register rt,Register rs)2155 void Assembler::rotrv(Register rd, Register rt, Register rs) {
2156   // Should be called via MacroAssembler::Ror.
2157   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
2158   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2159   Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
2160      | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
2161   emit(instr);
2162 }
2163 
2164 
dsll(Register rd,Register rt,uint16_t sa)2165 void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
2166   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL);
2167 }
2168 
2169 
dsllv(Register rd,Register rt,Register rs)2170 void Assembler::dsllv(Register rd, Register rt, Register rs) {
2171   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV);
2172 }
2173 
2174 
dsrl(Register rd,Register rt,uint16_t sa)2175 void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
2176   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL);
2177 }
2178 
2179 
dsrlv(Register rd,Register rt,Register rs)2180 void Assembler::dsrlv(Register rd, Register rt, Register rs) {
2181   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV);
2182 }
2183 
2184 
drotr(Register rd,Register rt,uint16_t sa)2185 void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
2186   DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
2187   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
2188       | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
2189   emit(instr);
2190 }
2191 
drotr32(Register rd,Register rt,uint16_t sa)2192 void Assembler::drotr32(Register rd, Register rt, uint16_t sa) {
2193   DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
2194   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
2195                 (rd.code() << kRdShift) | (sa << kSaShift) | DSRL32;
2196   emit(instr);
2197 }
2198 
drotrv(Register rd,Register rt,Register rs)2199 void Assembler::drotrv(Register rd, Register rt, Register rs) {
2200   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
2201   Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
2202       | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
2203   emit(instr);
2204 }
2205 
2206 
dsra(Register rd,Register rt,uint16_t sa)2207 void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
2208   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA);
2209 }
2210 
2211 
dsrav(Register rd,Register rt,Register rs)2212 void Assembler::dsrav(Register rd, Register rt, Register rs) {
2213   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV);
2214 }
2215 
2216 
dsll32(Register rd,Register rt,uint16_t sa)2217 void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
2218   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL32);
2219 }
2220 
2221 
dsrl32(Register rd,Register rt,uint16_t sa)2222 void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
2223   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL32);
2224 }
2225 
2226 
dsra32(Register rd,Register rt,uint16_t sa)2227 void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
2228   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA32);
2229 }
2230 
2231 
lsa(Register rd,Register rt,Register rs,uint8_t sa)2232 void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
2233   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
2234   DCHECK_LE(sa, 3);
2235   DCHECK_EQ(kArchVariant, kMips64r6);
2236   Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
2237                 rd.code() << kRdShift | sa << kSaShift | LSA;
2238   emit(instr);
2239 }
2240 
2241 
dlsa(Register rd,Register rt,Register rs,uint8_t sa)2242 void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
2243   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
2244   DCHECK_LE(sa, 3);
2245   DCHECK_EQ(kArchVariant, kMips64r6);
2246   Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
2247                 rd.code() << kRdShift | sa << kSaShift | DLSA;
2248   emit(instr);
2249 }
2250 
2251 
2252 // ------------Memory-instructions-------------
2253 
AdjustBaseAndOffset(MemOperand & src,OffsetAccessType access_type,int second_access_add_to_offset)2254 void Assembler::AdjustBaseAndOffset(MemOperand& src,
2255                                     OffsetAccessType access_type,
2256                                     int second_access_add_to_offset) {
2257   // This method is used to adjust the base register and offset pair
2258   // for a load/store when the offset doesn't fit into int16_t.
2259   // It is assumed that 'base + offset' is sufficiently aligned for memory
2260   // operands that are machine word in size or smaller. For doubleword-sized
2261   // operands it's assumed that 'base' is a multiple of 8, while 'offset'
2262   // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments
2263   // and spilled variables on the stack accessed relative to the stack
2264   // pointer register).
2265   // We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8.
2266 
2267   bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0;
2268   bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
2269   DCHECK_LE(second_access_add_to_offset, 7);  // Must be <= 7.
2270 
2271   // is_int16 must be passed a signed value, hence the static cast below.
2272   if (is_int16(src.offset()) &&
2273       (!two_accesses || is_int16(static_cast<int32_t>(
2274                             src.offset() + second_access_add_to_offset)))) {
2275     // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
2276     // value) fits into int16_t.
2277     return;
2278   }
2279 
2280   DCHECK(src.rm() !=
2281          at);  // Must not overwrite the register 'base' while loading 'offset'.
2282 
2283 #ifdef DEBUG
2284   // Remember the "(mis)alignment" of 'offset', it will be checked at the end.
2285   uint32_t misalignment = src.offset() & (kDoubleSize - 1);
2286 #endif
2287 
2288   // Do not load the whole 32-bit 'offset' if it can be represented as
2289   // a sum of two 16-bit signed offsets. This can save an instruction or two.
2290   // To simplify matters, only do this for a symmetric range of offsets from
2291   // about -64KB to about +64KB, allowing further addition of 4 when accessing
2292   // 64-bit variables with two 32-bit accesses.
2293   constexpr int32_t kMinOffsetForSimpleAdjustment =
2294       0x7FF8;  // Max int16_t that's a multiple of 8.
2295   constexpr int32_t kMaxOffsetForSimpleAdjustment =
2296       2 * kMinOffsetForSimpleAdjustment;
2297 
2298   UseScratchRegisterScope temps(this);
2299   Register scratch = temps.Acquire();
2300   if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) {
2301     daddiu(scratch, src.rm(), kMinOffsetForSimpleAdjustment);
2302     src.offset_ -= kMinOffsetForSimpleAdjustment;
2303   } else if (-kMaxOffsetForSimpleAdjustment <= src.offset() &&
2304              src.offset() < 0) {
2305     daddiu(scratch, src.rm(), -kMinOffsetForSimpleAdjustment);
2306     src.offset_ += kMinOffsetForSimpleAdjustment;
2307   } else if (kArchVariant == kMips64r6) {
2308     // On r6 take advantage of the daui instruction, e.g.:
2309     //    daui   at, base, offset_high
2310     //   [dahi   at, 1]                       // When `offset` is close to +2GB.
2311     //    lw     reg_lo, offset_low(at)
2312     //   [lw     reg_hi, (offset_low+4)(at)]  // If misaligned 64-bit load.
2313     // or when offset_low+4 overflows int16_t:
2314     //    daui   at, base, offset_high
2315     //    daddiu at, at, 8
2316     //    lw     reg_lo, (offset_low-8)(at)
2317     //    lw     reg_hi, (offset_low-4)(at)
2318     int16_t offset_low = static_cast<uint16_t>(src.offset());
2319     int32_t offset_low32 = offset_low;
2320     int16_t offset_high = static_cast<uint16_t>(src.offset() >> 16);
2321     bool increment_hi16 = offset_low < 0;
2322     bool overflow_hi16 = false;
2323 
2324     if (increment_hi16) {
2325       offset_high++;
2326       overflow_hi16 = (offset_high == -32768);
2327     }
2328     daui(scratch, src.rm(), static_cast<uint16_t>(offset_high));
2329 
2330     if (overflow_hi16) {
2331       dahi(scratch, 1);
2332     }
2333 
2334     if (two_accesses && !is_int16(static_cast<int32_t>(
2335                             offset_low32 + second_access_add_to_offset))) {
2336       // Avoid overflow in the 16-bit offset of the load/store instruction when
2337       // adding 4.
2338       daddiu(scratch, scratch, kDoubleSize);
2339       offset_low32 -= kDoubleSize;
2340     }
2341 
2342     src.offset_ = offset_low32;
2343   } else {
2344     // Do not load the whole 32-bit 'offset' if it can be represented as
2345     // a sum of three 16-bit signed offsets. This can save an instruction.
2346     // To simplify matters, only do this for a symmetric range of offsets from
2347     // about -96KB to about +96KB, allowing further addition of 4 when accessing
2348     // 64-bit variables with two 32-bit accesses.
2349     constexpr int32_t kMinOffsetForMediumAdjustment =
2350         2 * kMinOffsetForSimpleAdjustment;
2351     constexpr int32_t kMaxOffsetForMediumAdjustment =
2352         3 * kMinOffsetForSimpleAdjustment;
2353     if (0 <= src.offset() && src.offset() <= kMaxOffsetForMediumAdjustment) {
2354       daddiu(scratch, src.rm(), kMinOffsetForMediumAdjustment / 2);
2355       daddiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2);
2356       src.offset_ -= kMinOffsetForMediumAdjustment;
2357     } else if (-kMaxOffsetForMediumAdjustment <= src.offset() &&
2358                src.offset() < 0) {
2359       daddiu(scratch, src.rm(), -kMinOffsetForMediumAdjustment / 2);
2360       daddiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2);
2361       src.offset_ += kMinOffsetForMediumAdjustment;
2362     } else {
2363       // Now that all shorter options have been exhausted, load the full 32-bit
2364       // offset.
2365       int32_t loaded_offset = RoundDown(src.offset(), kDoubleSize);
2366       lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask);
2367       ori(scratch, scratch, loaded_offset & kImm16Mask);  // Load 32-bit offset.
2368       daddu(scratch, scratch, src.rm());
2369       src.offset_ -= loaded_offset;
2370     }
2371   }
2372   src.rm_ = scratch;
2373 
2374   DCHECK(is_int16(src.offset()));
2375   if (two_accesses) {
2376     DCHECK(is_int16(
2377         static_cast<int32_t>(src.offset() + second_access_add_to_offset)));
2378   }
2379   DCHECK(misalignment == (src.offset() & (kDoubleSize - 1)));
2380 }
2381 
lb(Register rd,const MemOperand & rs)2382 void Assembler::lb(Register rd, const MemOperand& rs) {
2383   GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
2384 }
2385 
2386 
lbu(Register rd,const MemOperand & rs)2387 void Assembler::lbu(Register rd, const MemOperand& rs) {
2388   GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
2389 }
2390 
2391 
lh(Register rd,const MemOperand & rs)2392 void Assembler::lh(Register rd, const MemOperand& rs) {
2393   GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
2394 }
2395 
2396 
lhu(Register rd,const MemOperand & rs)2397 void Assembler::lhu(Register rd, const MemOperand& rs) {
2398   GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
2399 }
2400 
2401 
lw(Register rd,const MemOperand & rs)2402 void Assembler::lw(Register rd, const MemOperand& rs) {
2403   GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
2404 }
2405 
2406 
lwu(Register rd,const MemOperand & rs)2407 void Assembler::lwu(Register rd, const MemOperand& rs) {
2408   GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
2409 }
2410 
2411 
lwl(Register rd,const MemOperand & rs)2412 void Assembler::lwl(Register rd, const MemOperand& rs) {
2413   DCHECK(is_int16(rs.offset_));
2414   DCHECK_EQ(kArchVariant, kMips64r2);
2415   GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
2416 }
2417 
2418 
lwr(Register rd,const MemOperand & rs)2419 void Assembler::lwr(Register rd, const MemOperand& rs) {
2420   DCHECK(is_int16(rs.offset_));
2421   DCHECK_EQ(kArchVariant, kMips64r2);
2422   GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
2423 }
2424 
2425 
sb(Register rd,const MemOperand & rs)2426 void Assembler::sb(Register rd, const MemOperand& rs) {
2427   GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
2428 }
2429 
2430 
sh(Register rd,const MemOperand & rs)2431 void Assembler::sh(Register rd, const MemOperand& rs) {
2432   GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
2433 }
2434 
2435 
sw(Register rd,const MemOperand & rs)2436 void Assembler::sw(Register rd, const MemOperand& rs) {
2437   GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
2438 }
2439 
2440 
swl(Register rd,const MemOperand & rs)2441 void Assembler::swl(Register rd, const MemOperand& rs) {
2442   DCHECK(is_int16(rs.offset_));
2443   DCHECK_EQ(kArchVariant, kMips64r2);
2444   GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
2445 }
2446 
2447 
swr(Register rd,const MemOperand & rs)2448 void Assembler::swr(Register rd, const MemOperand& rs) {
2449   DCHECK(is_int16(rs.offset_));
2450   DCHECK_EQ(kArchVariant, kMips64r2);
2451   GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
2452 }
2453 
ll(Register rd,const MemOperand & rs)2454 void Assembler::ll(Register rd, const MemOperand& rs) {
2455   if (kArchVariant == kMips64r6) {
2456     DCHECK(is_int9(rs.offset_));
2457     GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, LL_R6);
2458   } else {
2459     DCHECK_EQ(kArchVariant, kMips64r2);
2460     DCHECK(is_int16(rs.offset_));
2461     GenInstrImmediate(LL, rs.rm(), rd, rs.offset_);
2462   }
2463 }
2464 
lld(Register rd,const MemOperand & rs)2465 void Assembler::lld(Register rd, const MemOperand& rs) {
2466   if (kArchVariant == kMips64r6) {
2467     DCHECK(is_int9(rs.offset_));
2468     GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, LLD_R6);
2469   } else {
2470     DCHECK_EQ(kArchVariant, kMips64r2);
2471     DCHECK(is_int16(rs.offset_));
2472     GenInstrImmediate(LLD, rs.rm(), rd, rs.offset_);
2473   }
2474 }
2475 
sc(Register rd,const MemOperand & rs)2476 void Assembler::sc(Register rd, const MemOperand& rs) {
2477   if (kArchVariant == kMips64r6) {
2478     DCHECK(is_int9(rs.offset_));
2479     GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, SC_R6);
2480   } else {
2481     DCHECK_EQ(kArchVariant, kMips64r2);
2482     GenInstrImmediate(SC, rs.rm(), rd, rs.offset_);
2483   }
2484 }
2485 
scd(Register rd,const MemOperand & rs)2486 void Assembler::scd(Register rd, const MemOperand& rs) {
2487   if (kArchVariant == kMips64r6) {
2488     DCHECK(is_int9(rs.offset_));
2489     GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, SCD_R6);
2490   } else {
2491     DCHECK_EQ(kArchVariant, kMips64r2);
2492     GenInstrImmediate(SCD, rs.rm(), rd, rs.offset_);
2493   }
2494 }
2495 
lui(Register rd,int32_t j)2496 void Assembler::lui(Register rd, int32_t j) {
2497   DCHECK(is_uint16(j) || is_int16(j));
2498   GenInstrImmediate(LUI, zero_reg, rd, j);
2499 }
2500 
2501 
aui(Register rt,Register rs,int32_t j)2502 void Assembler::aui(Register rt, Register rs, int32_t j) {
2503   // This instruction uses same opcode as 'lui'. The difference in encoding is
2504   // 'lui' has zero reg. for rs field.
2505   DCHECK(is_uint16(j));
2506   GenInstrImmediate(LUI, rs, rt, j);
2507 }
2508 
2509 
daui(Register rt,Register rs,int32_t j)2510 void Assembler::daui(Register rt, Register rs, int32_t j) {
2511   DCHECK(is_uint16(j));
2512   DCHECK(rs != zero_reg);
2513   GenInstrImmediate(DAUI, rs, rt, j);
2514 }
2515 
2516 
dahi(Register rs,int32_t j)2517 void Assembler::dahi(Register rs, int32_t j) {
2518   DCHECK(is_uint16(j));
2519   GenInstrImmediate(REGIMM, rs, DAHI, j);
2520 }
2521 
2522 
dati(Register rs,int32_t j)2523 void Assembler::dati(Register rs, int32_t j) {
2524   DCHECK(is_uint16(j));
2525   GenInstrImmediate(REGIMM, rs, DATI, j);
2526 }
2527 
2528 
ldl(Register rd,const MemOperand & rs)2529 void Assembler::ldl(Register rd, const MemOperand& rs) {
2530   DCHECK(is_int16(rs.offset_));
2531   DCHECK_EQ(kArchVariant, kMips64r2);
2532   GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
2533 }
2534 
2535 
ldr(Register rd,const MemOperand & rs)2536 void Assembler::ldr(Register rd, const MemOperand& rs) {
2537   DCHECK(is_int16(rs.offset_));
2538   DCHECK_EQ(kArchVariant, kMips64r2);
2539   GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
2540 }
2541 
2542 
sdl(Register rd,const MemOperand & rs)2543 void Assembler::sdl(Register rd, const MemOperand& rs) {
2544   DCHECK(is_int16(rs.offset_));
2545   DCHECK_EQ(kArchVariant, kMips64r2);
2546   GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
2547 }
2548 
2549 
sdr(Register rd,const MemOperand & rs)2550 void Assembler::sdr(Register rd, const MemOperand& rs) {
2551   DCHECK(is_int16(rs.offset_));
2552   DCHECK_EQ(kArchVariant, kMips64r2);
2553   GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
2554 }
2555 
2556 
ld(Register rd,const MemOperand & rs)2557 void Assembler::ld(Register rd, const MemOperand& rs) {
2558   GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
2559 }
2560 
2561 
sd(Register rd,const MemOperand & rs)2562 void Assembler::sd(Register rd, const MemOperand& rs) {
2563   GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
2564 }
2565 
2566 
2567 // ---------PC-Relative instructions-----------
2568 
addiupc(Register rs,int32_t imm19)2569 void Assembler::addiupc(Register rs, int32_t imm19) {
2570   DCHECK_EQ(kArchVariant, kMips64r6);
2571   DCHECK(rs.is_valid() && is_int19(imm19));
2572   uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
2573   GenInstrImmediate(PCREL, rs, imm21);
2574 }
2575 
2576 
lwpc(Register rs,int32_t offset19)2577 void Assembler::lwpc(Register rs, int32_t offset19) {
2578   DCHECK_EQ(kArchVariant, kMips64r6);
2579   DCHECK(rs.is_valid() && is_int19(offset19));
2580   uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
2581   GenInstrImmediate(PCREL, rs, imm21);
2582 }
2583 
2584 
lwupc(Register rs,int32_t offset19)2585 void Assembler::lwupc(Register rs, int32_t offset19) {
2586   DCHECK_EQ(kArchVariant, kMips64r6);
2587   DCHECK(rs.is_valid() && is_int19(offset19));
2588   uint32_t imm21 = LWUPC << kImm19Bits | (offset19 & kImm19Mask);
2589   GenInstrImmediate(PCREL, rs, imm21);
2590 }
2591 
2592 
ldpc(Register rs,int32_t offset18)2593 void Assembler::ldpc(Register rs, int32_t offset18) {
2594   DCHECK_EQ(kArchVariant, kMips64r6);
2595   DCHECK(rs.is_valid() && is_int18(offset18));
2596   uint32_t imm21 = LDPC << kImm18Bits | (offset18 & kImm18Mask);
2597   GenInstrImmediate(PCREL, rs, imm21);
2598 }
2599 
2600 
auipc(Register rs,int16_t imm16)2601 void Assembler::auipc(Register rs, int16_t imm16) {
2602   DCHECK_EQ(kArchVariant, kMips64r6);
2603   DCHECK(rs.is_valid());
2604   uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
2605   GenInstrImmediate(PCREL, rs, imm21);
2606 }
2607 
2608 
aluipc(Register rs,int16_t imm16)2609 void Assembler::aluipc(Register rs, int16_t imm16) {
2610   DCHECK_EQ(kArchVariant, kMips64r6);
2611   DCHECK(rs.is_valid());
2612   uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
2613   GenInstrImmediate(PCREL, rs, imm21);
2614 }
2615 
2616 
2617 // -------------Misc-instructions--------------
2618 
2619 // Break / Trap instructions.
break_(uint32_t code,bool break_as_stop)2620 void Assembler::break_(uint32_t code, bool break_as_stop) {
2621   DCHECK_EQ(code & ~0xFFFFF, 0);
2622   // We need to invalidate breaks that could be stops as well because the
2623   // simulator expects a char pointer after the stop instruction.
2624   // See constants-mips.h for explanation.
2625   DCHECK((break_as_stop &&
2626           code <= kMaxStopCode &&
2627           code > kMaxWatchpointCode) ||
2628          (!break_as_stop &&
2629           (code > kMaxStopCode ||
2630            code <= kMaxWatchpointCode)));
2631   Instr break_instr = SPECIAL | BREAK | (code << 6);
2632   emit(break_instr);
2633 }
2634 
2635 
stop(const char * msg,uint32_t code)2636 void Assembler::stop(const char* msg, uint32_t code) {
2637   DCHECK_GT(code, kMaxWatchpointCode);
2638   DCHECK_LE(code, kMaxStopCode);
2639 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
2640   break_(0x54321);
2641 #else  // V8_HOST_ARCH_MIPS
2642   break_(code, true);
2643 #endif
2644 }
2645 
2646 
tge(Register rs,Register rt,uint16_t code)2647 void Assembler::tge(Register rs, Register rt, uint16_t code) {
2648   DCHECK(is_uint10(code));
2649   Instr instr = SPECIAL | TGE | rs.code() << kRsShift
2650       | rt.code() << kRtShift | code << 6;
2651   emit(instr);
2652 }
2653 
2654 
tgeu(Register rs,Register rt,uint16_t code)2655 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
2656   DCHECK(is_uint10(code));
2657   Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
2658       | rt.code() << kRtShift | code << 6;
2659   emit(instr);
2660 }
2661 
2662 
tlt(Register rs,Register rt,uint16_t code)2663 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
2664   DCHECK(is_uint10(code));
2665   Instr instr =
2666       SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2667   emit(instr);
2668 }
2669 
2670 
tltu(Register rs,Register rt,uint16_t code)2671 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
2672   DCHECK(is_uint10(code));
2673   Instr instr =
2674       SPECIAL | TLTU | rs.code() << kRsShift
2675       | rt.code() << kRtShift | code << 6;
2676   emit(instr);
2677 }
2678 
2679 
teq(Register rs,Register rt,uint16_t code)2680 void Assembler::teq(Register rs, Register rt, uint16_t code) {
2681   DCHECK(is_uint10(code));
2682   Instr instr =
2683       SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2684   emit(instr);
2685 }
2686 
2687 
tne(Register rs,Register rt,uint16_t code)2688 void Assembler::tne(Register rs, Register rt, uint16_t code) {
2689   DCHECK(is_uint10(code));
2690   Instr instr =
2691       SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2692   emit(instr);
2693 }
2694 
sync()2695 void Assembler::sync() {
2696   Instr sync_instr = SPECIAL | SYNC;
2697   emit(sync_instr);
2698 }
2699 
2700 // Move from HI/LO register.
2701 
mfhi(Register rd)2702 void Assembler::mfhi(Register rd) {
2703   GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
2704 }
2705 
2706 
mflo(Register rd)2707 void Assembler::mflo(Register rd) {
2708   GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
2709 }
2710 
2711 
2712 // Set on less than instructions.
slt(Register rd,Register rs,Register rt)2713 void Assembler::slt(Register rd, Register rs, Register rt) {
2714   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
2715 }
2716 
2717 
sltu(Register rd,Register rs,Register rt)2718 void Assembler::sltu(Register rd, Register rs, Register rt) {
2719   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
2720 }
2721 
2722 
slti(Register rt,Register rs,int32_t j)2723 void Assembler::slti(Register rt, Register rs, int32_t j) {
2724   GenInstrImmediate(SLTI, rs, rt, j);
2725 }
2726 
2727 
sltiu(Register rt,Register rs,int32_t j)2728 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
2729   GenInstrImmediate(SLTIU, rs, rt, j);
2730 }
2731 
2732 
2733 // Conditional move.
movz(Register rd,Register rs,Register rt)2734 void Assembler::movz(Register rd, Register rs, Register rt) {
2735   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2736 }
2737 
2738 
movn(Register rd,Register rs,Register rt)2739 void Assembler::movn(Register rd, Register rs, Register rt) {
2740   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2741 }
2742 
2743 
movt(Register rd,Register rs,uint16_t cc)2744 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2745   Register rt = Register::from_code((cc & 0x0007) << 2 | 1);
2746   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2747 }
2748 
2749 
movf(Register rd,Register rs,uint16_t cc)2750 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2751   Register rt = Register::from_code((cc & 0x0007) << 2 | 0);
2752   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2753 }
2754 
2755 
min_s(FPURegister fd,FPURegister fs,FPURegister ft)2756 void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2757   min(S, fd, fs, ft);
2758 }
2759 
2760 
min_d(FPURegister fd,FPURegister fs,FPURegister ft)2761 void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2762   min(D, fd, fs, ft);
2763 }
2764 
2765 
max_s(FPURegister fd,FPURegister fs,FPURegister ft)2766 void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2767   max(S, fd, fs, ft);
2768 }
2769 
2770 
max_d(FPURegister fd,FPURegister fs,FPURegister ft)2771 void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2772   max(D, fd, fs, ft);
2773 }
2774 
2775 
mina_s(FPURegister fd,FPURegister fs,FPURegister ft)2776 void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2777   mina(S, fd, fs, ft);
2778 }
2779 
2780 
mina_d(FPURegister fd,FPURegister fs,FPURegister ft)2781 void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2782   mina(D, fd, fs, ft);
2783 }
2784 
2785 
maxa_s(FPURegister fd,FPURegister fs,FPURegister ft)2786 void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2787   maxa(S, fd, fs, ft);
2788 }
2789 
2790 
maxa_d(FPURegister fd,FPURegister fs,FPURegister ft)2791 void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2792   maxa(D, fd, fs, ft);
2793 }
2794 
2795 
max(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2796 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
2797                     FPURegister ft) {
2798   DCHECK_EQ(kArchVariant, kMips64r6);
2799   DCHECK((fmt == D) || (fmt == S));
2800   GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2801 }
2802 
2803 
min(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2804 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
2805                     FPURegister ft) {
2806   DCHECK_EQ(kArchVariant, kMips64r6);
2807   DCHECK((fmt == D) || (fmt == S));
2808   GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2809 }
2810 
2811 
2812 // GPR.
seleqz(Register rd,Register rs,Register rt)2813 void Assembler::seleqz(Register rd, Register rs, Register rt) {
2814   DCHECK_EQ(kArchVariant, kMips64r6);
2815   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2816 }
2817 
2818 
2819 // GPR.
selnez(Register rd,Register rs,Register rt)2820 void Assembler::selnez(Register rd, Register rs, Register rt) {
2821   DCHECK_EQ(kArchVariant, kMips64r6);
2822   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2823 }
2824 
2825 
2826 // Bit twiddling.
clz(Register rd,Register rs)2827 void Assembler::clz(Register rd, Register rs) {
2828   if (kArchVariant != kMips64r6) {
2829     // clz instr requires same GPR number in 'rd' and 'rt' fields.
2830     GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2831   } else {
2832     GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2833   }
2834 }
2835 
2836 
dclz(Register rd,Register rs)2837 void Assembler::dclz(Register rd, Register rs) {
2838   if (kArchVariant != kMips64r6) {
2839     // dclz instr requires same GPR number in 'rd' and 'rt' fields.
2840     GenInstrRegister(SPECIAL2, rs, rd, rd, 0, DCLZ);
2841   } else {
2842     GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, DCLZ_R6);
2843   }
2844 }
2845 
2846 
ins_(Register rt,Register rs,uint16_t pos,uint16_t size)2847 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2848   // Should be called via MacroAssembler::Ins.
2849   // ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2850   DCHECK((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6));
2851   GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2852 }
2853 
2854 
dins_(Register rt,Register rs,uint16_t pos,uint16_t size)2855 void Assembler::dins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2856   // Should be called via MacroAssembler::Dins.
2857   // dins instr has 'rt' field as dest, and two uint5: msb, lsb.
2858   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2859   GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, DINS);
2860 }
2861 
dinsm_(Register rt,Register rs,uint16_t pos,uint16_t size)2862 void Assembler::dinsm_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2863   // Should be called via MacroAssembler::Dins.
2864   // dinsm instr has 'rt' field as dest, and two uint5: msbminus32, lsb.
2865   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2866   GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1 - 32, pos, DINSM);
2867 }
2868 
dinsu_(Register rt,Register rs,uint16_t pos,uint16_t size)2869 void Assembler::dinsu_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2870   // Should be called via MacroAssembler::Dins.
2871   // dinsu instr has 'rt' field as dest, and two uint5: msbminus32, lsbminus32.
2872   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2873   GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1 - 32, pos - 32, DINSU);
2874 }
2875 
ext_(Register rt,Register rs,uint16_t pos,uint16_t size)2876 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2877   // Should be called via MacroAssembler::Ext.
2878   // ext instr has 'rt' field as dest, and two uint5: msbd, lsb.
2879   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2880   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2881 }
2882 
2883 
dext_(Register rt,Register rs,uint16_t pos,uint16_t size)2884 void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2885   // Should be called via MacroAssembler::Dext.
2886   // dext instr has 'rt' field as dest, and two uint5: msbd, lsb.
2887   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2888   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, DEXT);
2889 }
2890 
dextm_(Register rt,Register rs,uint16_t pos,uint16_t size)2891 void Assembler::dextm_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2892   // Should be called via MacroAssembler::Dextm.
2893   // dextm instr has 'rt' field as dest, and two uint5: msbdminus32, lsb.
2894   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2895   GenInstrRegister(SPECIAL3, rs, rt, size - 1 - 32, pos, DEXTM);
2896 }
2897 
dextu_(Register rt,Register rs,uint16_t pos,uint16_t size)2898 void Assembler::dextu_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2899   // Should be called via MacroAssembler::Dextu.
2900   // dextu instr has 'rt' field as dest, and two uint5: msbd, lsbminus32.
2901   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2902   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos - 32, DEXTU);
2903 }
2904 
2905 
bitswap(Register rd,Register rt)2906 void Assembler::bitswap(Register rd, Register rt) {
2907   DCHECK_EQ(kArchVariant, kMips64r6);
2908   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
2909 }
2910 
2911 
dbitswap(Register rd,Register rt)2912 void Assembler::dbitswap(Register rd, Register rt) {
2913   DCHECK_EQ(kArchVariant, kMips64r6);
2914   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, DBSHFL);
2915 }
2916 
2917 
pref(int32_t hint,const MemOperand & rs)2918 void Assembler::pref(int32_t hint, const MemOperand& rs) {
2919   DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2920   Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
2921       | (rs.offset_);
2922   emit(instr);
2923 }
2924 
2925 
align(Register rd,Register rs,Register rt,uint8_t bp)2926 void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
2927   DCHECK_EQ(kArchVariant, kMips64r6);
2928   DCHECK(is_uint3(bp));
2929   uint16_t sa = (ALIGN << kBp2Bits) | bp;
2930   GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
2931 }
2932 
2933 
dalign(Register rd,Register rs,Register rt,uint8_t bp)2934 void Assembler::dalign(Register rd, Register rs, Register rt, uint8_t bp) {
2935   DCHECK_EQ(kArchVariant, kMips64r6);
2936   DCHECK(is_uint3(bp));
2937   uint16_t sa = (DALIGN << kBp3Bits) | bp;
2938   GenInstrRegister(SPECIAL3, rs, rt, rd, sa, DBSHFL);
2939 }
2940 
wsbh(Register rd,Register rt)2941 void Assembler::wsbh(Register rd, Register rt) {
2942   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2943   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL);
2944 }
2945 
dsbh(Register rd,Register rt)2946 void Assembler::dsbh(Register rd, Register rt) {
2947   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2948   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, DSBH, DBSHFL);
2949 }
2950 
dshd(Register rd,Register rt)2951 void Assembler::dshd(Register rd, Register rt) {
2952   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2953   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, DSHD, DBSHFL);
2954 }
2955 
seh(Register rd,Register rt)2956 void Assembler::seh(Register rd, Register rt) {
2957   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2958   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL);
2959 }
2960 
seb(Register rd,Register rt)2961 void Assembler::seb(Register rd, Register rt) {
2962   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2963   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
2964 }
2965 
2966 // --------Coprocessor-instructions----------------
2967 
2968 // Load, store, move.
lwc1(FPURegister fd,const MemOperand & src)2969 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
2970   GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
2971 }
2972 
2973 
ldc1(FPURegister fd,const MemOperand & src)2974 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
2975   GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
2976 }
2977 
swc1(FPURegister fs,const MemOperand & src)2978 void Assembler::swc1(FPURegister fs, const MemOperand& src) {
2979   GenInstrImmediate(SWC1, src.rm(), fs, src.offset_);
2980 }
2981 
sdc1(FPURegister fs,const MemOperand & src)2982 void Assembler::sdc1(FPURegister fs, const MemOperand& src) {
2983   GenInstrImmediate(SDC1, src.rm(), fs, src.offset_);
2984 }
2985 
2986 
mtc1(Register rt,FPURegister fs)2987 void Assembler::mtc1(Register rt, FPURegister fs) {
2988   GenInstrRegister(COP1, MTC1, rt, fs, f0);
2989 }
2990 
2991 
mthc1(Register rt,FPURegister fs)2992 void Assembler::mthc1(Register rt, FPURegister fs) {
2993   GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2994 }
2995 
2996 
dmtc1(Register rt,FPURegister fs)2997 void Assembler::dmtc1(Register rt, FPURegister fs) {
2998   GenInstrRegister(COP1, DMTC1, rt, fs, f0);
2999 }
3000 
3001 
mfc1(Register rt,FPURegister fs)3002 void Assembler::mfc1(Register rt, FPURegister fs) {
3003   GenInstrRegister(COP1, MFC1, rt, fs, f0);
3004 }
3005 
3006 
mfhc1(Register rt,FPURegister fs)3007 void Assembler::mfhc1(Register rt, FPURegister fs) {
3008   GenInstrRegister(COP1, MFHC1, rt, fs, f0);
3009 }
3010 
3011 
dmfc1(Register rt,FPURegister fs)3012 void Assembler::dmfc1(Register rt, FPURegister fs) {
3013   GenInstrRegister(COP1, DMFC1, rt, fs, f0);
3014 }
3015 
3016 
ctc1(Register rt,FPUControlRegister fs)3017 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
3018   GenInstrRegister(COP1, CTC1, rt, fs);
3019 }
3020 
3021 
cfc1(Register rt,FPUControlRegister fs)3022 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
3023   GenInstrRegister(COP1, CFC1, rt, fs);
3024 }
3025 
3026 
sel(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)3027 void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
3028                     FPURegister ft) {
3029   DCHECK_EQ(kArchVariant, kMips64r6);
3030   DCHECK((fmt == D) || (fmt == S));
3031 
3032   GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
3033 }
3034 
3035 
sel_s(FPURegister fd,FPURegister fs,FPURegister ft)3036 void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3037   sel(S, fd, fs, ft);
3038 }
3039 
3040 
sel_d(FPURegister fd,FPURegister fs,FPURegister ft)3041 void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3042   sel(D, fd, fs, ft);
3043 }
3044 
3045 
3046 // FPR.
seleqz(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)3047 void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
3048                        FPURegister ft) {
3049   DCHECK((fmt == D) || (fmt == S));
3050   GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
3051 }
3052 
3053 
seleqz_d(FPURegister fd,FPURegister fs,FPURegister ft)3054 void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3055   seleqz(D, fd, fs, ft);
3056 }
3057 
3058 
seleqz_s(FPURegister fd,FPURegister fs,FPURegister ft)3059 void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3060   seleqz(S, fd, fs, ft);
3061 }
3062 
3063 
selnez_d(FPURegister fd,FPURegister fs,FPURegister ft)3064 void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3065   selnez(D, fd, fs, ft);
3066 }
3067 
3068 
selnez_s(FPURegister fd,FPURegister fs,FPURegister ft)3069 void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3070   selnez(S, fd, fs, ft);
3071 }
3072 
3073 
movz_s(FPURegister fd,FPURegister fs,Register rt)3074 void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
3075   DCHECK_EQ(kArchVariant, kMips64r2);
3076   GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
3077 }
3078 
3079 
movz_d(FPURegister fd,FPURegister fs,Register rt)3080 void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
3081   DCHECK_EQ(kArchVariant, kMips64r2);
3082   GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
3083 }
3084 
3085 
movt_s(FPURegister fd,FPURegister fs,uint16_t cc)3086 void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
3087   DCHECK_EQ(kArchVariant, kMips64r2);
3088   FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
3089   GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
3090 }
3091 
3092 
movt_d(FPURegister fd,FPURegister fs,uint16_t cc)3093 void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
3094   DCHECK_EQ(kArchVariant, kMips64r2);
3095   FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
3096   GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
3097 }
3098 
3099 
movf_s(FPURegister fd,FPURegister fs,uint16_t cc)3100 void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
3101   DCHECK_EQ(kArchVariant, kMips64r2);
3102   FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
3103   GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
3104 }
3105 
3106 
movf_d(FPURegister fd,FPURegister fs,uint16_t cc)3107 void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
3108   DCHECK_EQ(kArchVariant, kMips64r2);
3109   FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
3110   GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
3111 }
3112 
3113 
movn_s(FPURegister fd,FPURegister fs,Register rt)3114 void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
3115   DCHECK_EQ(kArchVariant, kMips64r2);
3116   GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
3117 }
3118 
3119 
movn_d(FPURegister fd,FPURegister fs,Register rt)3120 void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
3121   DCHECK_EQ(kArchVariant, kMips64r2);
3122   GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
3123 }
3124 
3125 
3126 // FPR.
selnez(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)3127 void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
3128                        FPURegister ft) {
3129   DCHECK_EQ(kArchVariant, kMips64r6);
3130   DCHECK((fmt == D) || (fmt == S));
3131   GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
3132 }
3133 
3134 
3135 // Arithmetic.
3136 
add_s(FPURegister fd,FPURegister fs,FPURegister ft)3137 void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3138   GenInstrRegister(COP1, S, ft, fs, fd, ADD_D);
3139 }
3140 
3141 
add_d(FPURegister fd,FPURegister fs,FPURegister ft)3142 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3143   GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
3144 }
3145 
3146 
sub_s(FPURegister fd,FPURegister fs,FPURegister ft)3147 void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3148   GenInstrRegister(COP1, S, ft, fs, fd, SUB_D);
3149 }
3150 
3151 
sub_d(FPURegister fd,FPURegister fs,FPURegister ft)3152 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3153   GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
3154 }
3155 
3156 
mul_s(FPURegister fd,FPURegister fs,FPURegister ft)3157 void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3158   GenInstrRegister(COP1, S, ft, fs, fd, MUL_D);
3159 }
3160 
3161 
mul_d(FPURegister fd,FPURegister fs,FPURegister ft)3162 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3163   GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
3164 }
3165 
madd_s(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)3166 void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
3167                        FPURegister ft) {
3168   // On Loongson 3A (MIPS64R2), MADD.S instruction is actually fused MADD.S and
3169   // this causes failure in some of the tests. Since this optimization is rarely
3170   // used, and not used at all on MIPS64R6, this isntruction is removed.
3171   UNREACHABLE();
3172 }
3173 
madd_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)3174 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
3175     FPURegister ft) {
3176   // On Loongson 3A (MIPS64R2), MADD.D instruction is actually fused MADD.D and
3177   // this causes failure in some of the tests. Since this optimization is rarely
3178   // used, and not used at all on MIPS64R6, this isntruction is removed.
3179   UNREACHABLE();
3180 }
3181 
msub_s(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)3182 void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
3183                        FPURegister ft) {
3184   // See explanation for instruction madd_s.
3185   UNREACHABLE();
3186 }
3187 
msub_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)3188 void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
3189                        FPURegister ft) {
3190   // See explanation for instruction madd_d.
3191   UNREACHABLE();
3192 }
3193 
maddf_s(FPURegister fd,FPURegister fs,FPURegister ft)3194 void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3195   DCHECK_EQ(kArchVariant, kMips64r6);
3196   GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S);
3197 }
3198 
maddf_d(FPURegister fd,FPURegister fs,FPURegister ft)3199 void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3200   DCHECK_EQ(kArchVariant, kMips64r6);
3201   GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D);
3202 }
3203 
msubf_s(FPURegister fd,FPURegister fs,FPURegister ft)3204 void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3205   DCHECK_EQ(kArchVariant, kMips64r6);
3206   GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S);
3207 }
3208 
msubf_d(FPURegister fd,FPURegister fs,FPURegister ft)3209 void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3210   DCHECK_EQ(kArchVariant, kMips64r6);
3211   GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D);
3212 }
3213 
div_s(FPURegister fd,FPURegister fs,FPURegister ft)3214 void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3215   GenInstrRegister(COP1, S, ft, fs, fd, DIV_D);
3216 }
3217 
3218 
div_d(FPURegister fd,FPURegister fs,FPURegister ft)3219 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3220   GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
3221 }
3222 
3223 
abs_s(FPURegister fd,FPURegister fs)3224 void Assembler::abs_s(FPURegister fd, FPURegister fs) {
3225   GenInstrRegister(COP1, S, f0, fs, fd, ABS_D);
3226 }
3227 
3228 
abs_d(FPURegister fd,FPURegister fs)3229 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
3230   GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
3231 }
3232 
3233 
mov_d(FPURegister fd,FPURegister fs)3234 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
3235   GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
3236 }
3237 
3238 
mov_s(FPURegister fd,FPURegister fs)3239 void Assembler::mov_s(FPURegister fd, FPURegister fs) {
3240   GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
3241 }
3242 
3243 
neg_s(FPURegister fd,FPURegister fs)3244 void Assembler::neg_s(FPURegister fd, FPURegister fs) {
3245   GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
3246 }
3247 
3248 
neg_d(FPURegister fd,FPURegister fs)3249 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
3250   GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
3251 }
3252 
3253 
sqrt_s(FPURegister fd,FPURegister fs)3254 void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
3255   GenInstrRegister(COP1, S, f0, fs, fd, SQRT_D);
3256 }
3257 
3258 
sqrt_d(FPURegister fd,FPURegister fs)3259 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
3260   GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
3261 }
3262 
3263 
rsqrt_s(FPURegister fd,FPURegister fs)3264 void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
3265   GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
3266 }
3267 
3268 
rsqrt_d(FPURegister fd,FPURegister fs)3269 void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
3270   GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
3271 }
3272 
3273 
recip_d(FPURegister fd,FPURegister fs)3274 void Assembler::recip_d(FPURegister fd, FPURegister fs) {
3275   GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
3276 }
3277 
3278 
recip_s(FPURegister fd,FPURegister fs)3279 void Assembler::recip_s(FPURegister fd, FPURegister fs) {
3280   GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
3281 }
3282 
3283 
3284 // Conversions.
cvt_w_s(FPURegister fd,FPURegister fs)3285 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
3286   GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
3287 }
3288 
3289 
cvt_w_d(FPURegister fd,FPURegister fs)3290 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
3291   GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
3292 }
3293 
3294 
trunc_w_s(FPURegister fd,FPURegister fs)3295 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
3296   GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
3297 }
3298 
3299 
trunc_w_d(FPURegister fd,FPURegister fs)3300 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
3301   GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
3302 }
3303 
3304 
round_w_s(FPURegister fd,FPURegister fs)3305 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
3306   GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
3307 }
3308 
3309 
round_w_d(FPURegister fd,FPURegister fs)3310 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
3311   GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
3312 }
3313 
3314 
floor_w_s(FPURegister fd,FPURegister fs)3315 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
3316   GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
3317 }
3318 
3319 
floor_w_d(FPURegister fd,FPURegister fs)3320 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
3321   GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
3322 }
3323 
3324 
ceil_w_s(FPURegister fd,FPURegister fs)3325 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
3326   GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
3327 }
3328 
3329 
ceil_w_d(FPURegister fd,FPURegister fs)3330 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
3331   GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
3332 }
3333 
3334 
rint_s(FPURegister fd,FPURegister fs)3335 void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
3336 
3337 
rint_d(FPURegister fd,FPURegister fs)3338 void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
3339 
3340 
rint(SecondaryField fmt,FPURegister fd,FPURegister fs)3341 void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
3342   DCHECK_EQ(kArchVariant, kMips64r6);
3343   GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
3344 }
3345 
3346 
cvt_l_s(FPURegister fd,FPURegister fs)3347 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
3348   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
3349   GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
3350 }
3351 
3352 
cvt_l_d(FPURegister fd,FPURegister fs)3353 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
3354   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
3355   GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
3356 }
3357 
3358 
trunc_l_s(FPURegister fd,FPURegister fs)3359 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
3360   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
3361   GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
3362 }
3363 
3364 
trunc_l_d(FPURegister fd,FPURegister fs)3365 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
3366   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
3367   GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
3368 }
3369 
3370 
round_l_s(FPURegister fd,FPURegister fs)3371 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
3372   GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
3373 }
3374 
3375 
round_l_d(FPURegister fd,FPURegister fs)3376 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
3377   GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
3378 }
3379 
3380 
floor_l_s(FPURegister fd,FPURegister fs)3381 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
3382   GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
3383 }
3384 
3385 
floor_l_d(FPURegister fd,FPURegister fs)3386 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
3387   GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
3388 }
3389 
3390 
ceil_l_s(FPURegister fd,FPURegister fs)3391 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
3392   GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
3393 }
3394 
3395 
ceil_l_d(FPURegister fd,FPURegister fs)3396 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
3397   GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
3398 }
3399 
3400 
class_s(FPURegister fd,FPURegister fs)3401 void Assembler::class_s(FPURegister fd, FPURegister fs) {
3402   DCHECK_EQ(kArchVariant, kMips64r6);
3403   GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
3404 }
3405 
3406 
class_d(FPURegister fd,FPURegister fs)3407 void Assembler::class_d(FPURegister fd, FPURegister fs) {
3408   DCHECK_EQ(kArchVariant, kMips64r6);
3409   GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
3410 }
3411 
3412 
mina(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)3413 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
3414                      FPURegister ft) {
3415   DCHECK_EQ(kArchVariant, kMips64r6);
3416   DCHECK((fmt == D) || (fmt == S));
3417   GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
3418 }
3419 
3420 
maxa(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)3421 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
3422                      FPURegister ft) {
3423   DCHECK_EQ(kArchVariant, kMips64r6);
3424   DCHECK((fmt == D) || (fmt == S));
3425   GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
3426 }
3427 
3428 
cvt_s_w(FPURegister fd,FPURegister fs)3429 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
3430   GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
3431 }
3432 
3433 
cvt_s_l(FPURegister fd,FPURegister fs)3434 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
3435   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
3436   GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
3437 }
3438 
3439 
cvt_s_d(FPURegister fd,FPURegister fs)3440 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
3441   GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
3442 }
3443 
3444 
cvt_d_w(FPURegister fd,FPURegister fs)3445 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
3446   GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
3447 }
3448 
3449 
cvt_d_l(FPURegister fd,FPURegister fs)3450 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
3451   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
3452   GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
3453 }
3454 
3455 
cvt_d_s(FPURegister fd,FPURegister fs)3456 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
3457   GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
3458 }
3459 
3460 
3461 // Conditions for >= MIPSr6.
cmp(FPUCondition cond,SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)3462 void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
3463     FPURegister fd, FPURegister fs, FPURegister ft) {
3464   DCHECK_EQ(kArchVariant, kMips64r6);
3465   DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
3466   Instr instr = COP1 | fmt | ft.code() << kFtShift |
3467       fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
3468   emit(instr);
3469 }
3470 
3471 
cmp_s(FPUCondition cond,FPURegister fd,FPURegister fs,FPURegister ft)3472 void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
3473                       FPURegister ft) {
3474   cmp(cond, W, fd, fs, ft);
3475 }
3476 
cmp_d(FPUCondition cond,FPURegister fd,FPURegister fs,FPURegister ft)3477 void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
3478                       FPURegister ft) {
3479   cmp(cond, L, fd, fs, ft);
3480 }
3481 
3482 
bc1eqz(int16_t offset,FPURegister ft)3483 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
3484   DCHECK_EQ(kArchVariant, kMips64r6);
3485   BlockTrampolinePoolScope block_trampoline_pool(this);
3486   Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
3487   emit(instr);
3488   BlockTrampolinePoolFor(1);  // For associated delay slot.
3489 }
3490 
3491 
bc1nez(int16_t offset,FPURegister ft)3492 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
3493   DCHECK_EQ(kArchVariant, kMips64r6);
3494   BlockTrampolinePoolScope block_trampoline_pool(this);
3495   Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
3496   emit(instr);
3497   BlockTrampolinePoolFor(1);  // For associated delay slot.
3498 }
3499 
3500 
3501 // Conditions for < MIPSr6.
c(FPUCondition cond,SecondaryField fmt,FPURegister fs,FPURegister ft,uint16_t cc)3502 void Assembler::c(FPUCondition cond, SecondaryField fmt,
3503     FPURegister fs, FPURegister ft, uint16_t cc) {
3504   DCHECK_NE(kArchVariant, kMips64r6);
3505   DCHECK(is_uint3(cc));
3506   DCHECK(fmt == S || fmt == D);
3507   DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
3508   Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift
3509       | cc << 8 | 3 << 4 | cond;
3510   emit(instr);
3511 }
3512 
3513 
c_s(FPUCondition cond,FPURegister fs,FPURegister ft,uint16_t cc)3514 void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
3515                     uint16_t cc) {
3516   c(cond, S, fs, ft, cc);
3517 }
3518 
3519 
c_d(FPUCondition cond,FPURegister fs,FPURegister ft,uint16_t cc)3520 void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
3521                     uint16_t cc) {
3522   c(cond, D, fs, ft, cc);
3523 }
3524 
3525 
fcmp(FPURegister src1,const double src2,FPUCondition cond)3526 void Assembler::fcmp(FPURegister src1, const double src2,
3527       FPUCondition cond) {
3528   DCHECK_EQ(src2, 0.0);
3529   mtc1(zero_reg, f14);
3530   cvt_d_w(f14, f14);
3531   c(cond, D, src1, f14, 0);
3532 }
3533 
3534 
bc1f(int16_t offset,uint16_t cc)3535 void Assembler::bc1f(int16_t offset, uint16_t cc) {
3536   BlockTrampolinePoolScope block_trampoline_pool(this);
3537   DCHECK(is_uint3(cc));
3538   Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
3539   emit(instr);
3540   BlockTrampolinePoolFor(1);  // For associated delay slot.
3541 }
3542 
3543 
bc1t(int16_t offset,uint16_t cc)3544 void Assembler::bc1t(int16_t offset, uint16_t cc) {
3545   BlockTrampolinePoolScope block_trampoline_pool(this);
3546   DCHECK(is_uint3(cc));
3547   Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
3548   emit(instr);
3549   BlockTrampolinePoolFor(1);  // For associated delay slot.
3550 }
3551 
3552 // ---------- MSA instructions ------------
3553 #define MSA_BRANCH_LIST(V) \
3554   V(bz_v, BZ_V)            \
3555   V(bz_b, BZ_B)            \
3556   V(bz_h, BZ_H)            \
3557   V(bz_w, BZ_W)            \
3558   V(bz_d, BZ_D)            \
3559   V(bnz_v, BNZ_V)          \
3560   V(bnz_b, BNZ_B)          \
3561   V(bnz_h, BNZ_H)          \
3562   V(bnz_w, BNZ_W)          \
3563   V(bnz_d, BNZ_D)
3564 
3565 #define MSA_BRANCH(name, opcode)                         \
3566   void Assembler::name(MSARegister wt, int16_t offset) { \
3567     GenInstrMsaBranch(opcode, wt, offset);               \
3568   }
3569 
3570 MSA_BRANCH_LIST(MSA_BRANCH)
3571 #undef MSA_BRANCH
3572 #undef MSA_BRANCH_LIST
3573 
3574 #define MSA_LD_ST_LIST(V) \
3575   V(ld_b, LD_B)           \
3576   V(ld_h, LD_H)           \
3577   V(ld_w, LD_W)           \
3578   V(ld_d, LD_D)           \
3579   V(st_b, ST_B)           \
3580   V(st_h, ST_H)           \
3581   V(st_w, ST_W)           \
3582   V(st_d, ST_D)
3583 
3584 #define MSA_LD_ST(name, opcode)                                  \
3585   void Assembler::name(MSARegister wd, const MemOperand& rs) {   \
3586     MemOperand source = rs;                                      \
3587     AdjustBaseAndOffset(source);                                 \
3588     if (is_int10(source.offset())) {                             \
3589       GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \
3590     } else {                                                     \
3591       UseScratchRegisterScope temps(this);                       \
3592       Register scratch = temps.Acquire();                        \
3593       DCHECK(rs.rm() != scratch);                                \
3594       daddiu(scratch, source.rm(), source.offset());             \
3595       GenInstrMsaMI10(opcode, 0, scratch, wd);                   \
3596     }                                                            \
3597   }
3598 
MSA_LD_ST_LIST(MSA_LD_ST)3599 MSA_LD_ST_LIST(MSA_LD_ST)
3600 #undef MSA_LD_ST
3601 #undef MSA_BRANCH_LIST
3602 
3603 #define MSA_I10_LIST(V) \
3604   V(ldi_b, I5_DF_b)     \
3605   V(ldi_h, I5_DF_h)     \
3606   V(ldi_w, I5_DF_w)     \
3607   V(ldi_d, I5_DF_d)
3608 
3609 #define MSA_I10(name, format)                           \
3610   void Assembler::name(MSARegister wd, int32_t imm10) { \
3611     GenInstrMsaI10(LDI, format, imm10, wd);             \
3612   }
3613 MSA_I10_LIST(MSA_I10)
3614 #undef MSA_I10
3615 #undef MSA_I10_LIST
3616 
3617 #define MSA_I5_LIST(V) \
3618   V(addvi, ADDVI)      \
3619   V(subvi, SUBVI)      \
3620   V(maxi_s, MAXI_S)    \
3621   V(maxi_u, MAXI_U)    \
3622   V(mini_s, MINI_S)    \
3623   V(mini_u, MINI_U)    \
3624   V(ceqi, CEQI)        \
3625   V(clti_s, CLTI_S)    \
3626   V(clti_u, CLTI_U)    \
3627   V(clei_s, CLEI_S)    \
3628   V(clei_u, CLEI_U)
3629 
3630 #define MSA_I5_FORMAT(name, opcode, format)                       \
3631   void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3632                                   uint32_t imm5) {                \
3633     GenInstrMsaI5(opcode, I5_DF_##format, imm5, ws, wd);          \
3634   }
3635 
3636 #define MSA_I5(name, opcode)     \
3637   MSA_I5_FORMAT(name, opcode, b) \
3638   MSA_I5_FORMAT(name, opcode, h) \
3639   MSA_I5_FORMAT(name, opcode, w) \
3640   MSA_I5_FORMAT(name, opcode, d)
3641 
3642 MSA_I5_LIST(MSA_I5)
3643 #undef MSA_I5
3644 #undef MSA_I5_FORMAT
3645 #undef MSA_I5_LIST
3646 
3647 #define MSA_I8_LIST(V) \
3648   V(andi_b, ANDI_B)    \
3649   V(ori_b, ORI_B)      \
3650   V(nori_b, NORI_B)    \
3651   V(xori_b, XORI_B)    \
3652   V(bmnzi_b, BMNZI_B)  \
3653   V(bmzi_b, BMZI_B)    \
3654   V(bseli_b, BSELI_B)  \
3655   V(shf_b, SHF_B)      \
3656   V(shf_h, SHF_H)      \
3657   V(shf_w, SHF_W)
3658 
3659 #define MSA_I8(name, opcode)                                            \
3660   void Assembler::name(MSARegister wd, MSARegister ws, uint32_t imm8) { \
3661     GenInstrMsaI8(opcode, imm8, ws, wd);                                \
3662   }
3663 
3664 MSA_I8_LIST(MSA_I8)
3665 #undef MSA_I8
3666 #undef MSA_I8_LIST
3667 
3668 #define MSA_VEC_LIST(V) \
3669   V(and_v, AND_V)       \
3670   V(or_v, OR_V)         \
3671   V(nor_v, NOR_V)       \
3672   V(xor_v, XOR_V)       \
3673   V(bmnz_v, BMNZ_V)     \
3674   V(bmz_v, BMZ_V)       \
3675   V(bsel_v, BSEL_V)
3676 
3677 #define MSA_VEC(name, opcode)                                            \
3678   void Assembler::name(MSARegister wd, MSARegister ws, MSARegister wt) { \
3679     GenInstrMsaVec(opcode, wt, ws, wd);                                  \
3680   }
3681 
3682 MSA_VEC_LIST(MSA_VEC)
3683 #undef MSA_VEC
3684 #undef MSA_VEC_LIST
3685 
3686 #define MSA_2R_LIST(V) \
3687   V(pcnt, PCNT)        \
3688   V(nloc, NLOC)        \
3689   V(nlzc, NLZC)
3690 
3691 #define MSA_2R_FORMAT(name, opcode, format)                         \
3692   void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \
3693     GenInstrMsa2R(opcode, MSA_2R_DF_##format, ws, wd);              \
3694   }
3695 
3696 #define MSA_2R(name, opcode)     \
3697   MSA_2R_FORMAT(name, opcode, b) \
3698   MSA_2R_FORMAT(name, opcode, h) \
3699   MSA_2R_FORMAT(name, opcode, w) \
3700   MSA_2R_FORMAT(name, opcode, d)
3701 
3702 MSA_2R_LIST(MSA_2R)
3703 #undef MSA_2R
3704 #undef MSA_2R_FORMAT
3705 #undef MSA_2R_LIST
3706 
3707 #define MSA_FILL(format)                                              \
3708   void Assembler::fill_##format(MSARegister wd, Register rs) {        \
3709     DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));      \
3710     DCHECK(rs.is_valid() && wd.is_valid());                           \
3711     Instr instr = MSA | MSA_2R_FORMAT | FILL | MSA_2R_DF_##format |   \
3712                   (rs.code() << kWsShift) | (wd.code() << kWdShift) | \
3713                   MSA_VEC_2R_2RF_MINOR;                               \
3714     emit(instr);                                                      \
3715   }
3716 
3717 MSA_FILL(b)
3718 MSA_FILL(h)
3719 MSA_FILL(w)
3720 MSA_FILL(d)
3721 #undef MSA_FILL
3722 
3723 #define MSA_2RF_LIST(V) \
3724   V(fclass, FCLASS)     \
3725   V(ftrunc_s, FTRUNC_S) \
3726   V(ftrunc_u, FTRUNC_U) \
3727   V(fsqrt, FSQRT)       \
3728   V(frsqrt, FRSQRT)     \
3729   V(frcp, FRCP)         \
3730   V(frint, FRINT)       \
3731   V(flog2, FLOG2)       \
3732   V(fexupl, FEXUPL)     \
3733   V(fexupr, FEXUPR)     \
3734   V(ffql, FFQL)         \
3735   V(ffqr, FFQR)         \
3736   V(ftint_s, FTINT_S)   \
3737   V(ftint_u, FTINT_U)   \
3738   V(ffint_s, FFINT_S)   \
3739   V(ffint_u, FFINT_U)
3740 
3741 #define MSA_2RF_FORMAT(name, opcode, format)                        \
3742   void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \
3743     GenInstrMsa2RF(opcode, MSA_2RF_DF_##format, ws, wd);            \
3744   }
3745 
3746 #define MSA_2RF(name, opcode)     \
3747   MSA_2RF_FORMAT(name, opcode, w) \
3748   MSA_2RF_FORMAT(name, opcode, d)
3749 
3750 MSA_2RF_LIST(MSA_2RF)
3751 #undef MSA_2RF
3752 #undef MSA_2RF_FORMAT
3753 #undef MSA_2RF_LIST
3754 
3755 #define MSA_3R_LIST(V)  \
3756   V(sll, SLL_MSA)       \
3757   V(sra, SRA_MSA)       \
3758   V(srl, SRL_MSA)       \
3759   V(bclr, BCLR)         \
3760   V(bset, BSET)         \
3761   V(bneg, BNEG)         \
3762   V(binsl, BINSL)       \
3763   V(binsr, BINSR)       \
3764   V(addv, ADDV)         \
3765   V(subv, SUBV)         \
3766   V(max_s, MAX_S)       \
3767   V(max_u, MAX_U)       \
3768   V(min_s, MIN_S)       \
3769   V(min_u, MIN_U)       \
3770   V(max_a, MAX_A)       \
3771   V(min_a, MIN_A)       \
3772   V(ceq, CEQ)           \
3773   V(clt_s, CLT_S)       \
3774   V(clt_u, CLT_U)       \
3775   V(cle_s, CLE_S)       \
3776   V(cle_u, CLE_U)       \
3777   V(add_a, ADD_A)       \
3778   V(adds_a, ADDS_A)     \
3779   V(adds_s, ADDS_S)     \
3780   V(adds_u, ADDS_U)     \
3781   V(ave_s, AVE_S)       \
3782   V(ave_u, AVE_U)       \
3783   V(aver_s, AVER_S)     \
3784   V(aver_u, AVER_U)     \
3785   V(subs_s, SUBS_S)     \
3786   V(subs_u, SUBS_U)     \
3787   V(subsus_u, SUBSUS_U) \
3788   V(subsuu_s, SUBSUU_S) \
3789   V(asub_s, ASUB_S)     \
3790   V(asub_u, ASUB_U)     \
3791   V(mulv, MULV)         \
3792   V(maddv, MADDV)       \
3793   V(msubv, MSUBV)       \
3794   V(div_s, DIV_S_MSA)   \
3795   V(div_u, DIV_U)       \
3796   V(mod_s, MOD_S)       \
3797   V(mod_u, MOD_U)       \
3798   V(dotp_s, DOTP_S)     \
3799   V(dotp_u, DOTP_U)     \
3800   V(dpadd_s, DPADD_S)   \
3801   V(dpadd_u, DPADD_U)   \
3802   V(dpsub_s, DPSUB_S)   \
3803   V(dpsub_u, DPSUB_U)   \
3804   V(pckev, PCKEV)       \
3805   V(pckod, PCKOD)       \
3806   V(ilvl, ILVL)         \
3807   V(ilvr, ILVR)         \
3808   V(ilvev, ILVEV)       \
3809   V(ilvod, ILVOD)       \
3810   V(vshf, VSHF)         \
3811   V(srar, SRAR)         \
3812   V(srlr, SRLR)         \
3813   V(hadd_s, HADD_S)     \
3814   V(hadd_u, HADD_U)     \
3815   V(hsub_s, HSUB_S)     \
3816   V(hsub_u, HSUB_U)
3817 
3818 #define MSA_3R_FORMAT(name, opcode, format)                             \
3819   void Assembler::name##_##format(MSARegister wd, MSARegister ws,       \
3820                                   MSARegister wt) {                     \
3821     GenInstrMsa3R<MSARegister>(opcode, MSA_3R_DF_##format, wt, ws, wd); \
3822   }
3823 
3824 #define MSA_3R_FORMAT_SLD_SPLAT(name, opcode, format)                \
3825   void Assembler::name##_##format(MSARegister wd, MSARegister ws,    \
3826                                   Register rt) {                     \
3827     GenInstrMsa3R<Register>(opcode, MSA_3R_DF_##format, rt, ws, wd); \
3828   }
3829 
3830 #define MSA_3R(name, opcode)     \
3831   MSA_3R_FORMAT(name, opcode, b) \
3832   MSA_3R_FORMAT(name, opcode, h) \
3833   MSA_3R_FORMAT(name, opcode, w) \
3834   MSA_3R_FORMAT(name, opcode, d)
3835 
3836 #define MSA_3R_SLD_SPLAT(name, opcode)     \
3837   MSA_3R_FORMAT_SLD_SPLAT(name, opcode, b) \
3838   MSA_3R_FORMAT_SLD_SPLAT(name, opcode, h) \
3839   MSA_3R_FORMAT_SLD_SPLAT(name, opcode, w) \
3840   MSA_3R_FORMAT_SLD_SPLAT(name, opcode, d)
3841 
3842 MSA_3R_LIST(MSA_3R)
3843 MSA_3R_SLD_SPLAT(sld, SLD)
3844 MSA_3R_SLD_SPLAT(splat, SPLAT)
3845 
3846 #undef MSA_3R
3847 #undef MSA_3R_FORMAT
3848 #undef MSA_3R_FORMAT_SLD_SPLAT
3849 #undef MSA_3R_SLD_SPLAT
3850 #undef MSA_3R_LIST
3851 
3852 #define MSA_3RF_LIST1(V) \
3853   V(fcaf, FCAF)          \
3854   V(fcun, FCUN)          \
3855   V(fceq, FCEQ)          \
3856   V(fcueq, FCUEQ)        \
3857   V(fclt, FCLT)          \
3858   V(fcult, FCULT)        \
3859   V(fcle, FCLE)          \
3860   V(fcule, FCULE)        \
3861   V(fsaf, FSAF)          \
3862   V(fsun, FSUN)          \
3863   V(fseq, FSEQ)          \
3864   V(fsueq, FSUEQ)        \
3865   V(fslt, FSLT)          \
3866   V(fsult, FSULT)        \
3867   V(fsle, FSLE)          \
3868   V(fsule, FSULE)        \
3869   V(fadd, FADD)          \
3870   V(fsub, FSUB)          \
3871   V(fmul, FMUL)          \
3872   V(fdiv, FDIV)          \
3873   V(fmadd, FMADD)        \
3874   V(fmsub, FMSUB)        \
3875   V(fexp2, FEXP2)        \
3876   V(fmin, FMIN)          \
3877   V(fmin_a, FMIN_A)      \
3878   V(fmax, FMAX)          \
3879   V(fmax_a, FMAX_A)      \
3880   V(fcor, FCOR)          \
3881   V(fcune, FCUNE)        \
3882   V(fcne, FCNE)          \
3883   V(fsor, FSOR)          \
3884   V(fsune, FSUNE)        \
3885   V(fsne, FSNE)
3886 
3887 #define MSA_3RF_LIST2(V) \
3888   V(fexdo, FEXDO)        \
3889   V(ftq, FTQ)            \
3890   V(mul_q, MUL_Q)        \
3891   V(madd_q, MADD_Q)      \
3892   V(msub_q, MSUB_Q)      \
3893   V(mulr_q, MULR_Q)      \
3894   V(maddr_q, MADDR_Q)    \
3895   V(msubr_q, MSUBR_Q)
3896 
3897 #define MSA_3RF_FORMAT(name, opcode, df, df_c)                \
3898   void Assembler::name##_##df(MSARegister wd, MSARegister ws, \
3899                               MSARegister wt) {               \
3900     GenInstrMsa3RF(opcode, df_c, wt, ws, wd);                 \
3901   }
3902 
3903 #define MSA_3RF_1(name, opcode)      \
3904   MSA_3RF_FORMAT(name, opcode, w, 0) \
3905   MSA_3RF_FORMAT(name, opcode, d, 1)
3906 
3907 #define MSA_3RF_2(name, opcode)      \
3908   MSA_3RF_FORMAT(name, opcode, h, 0) \
3909   MSA_3RF_FORMAT(name, opcode, w, 1)
3910 
3911 MSA_3RF_LIST1(MSA_3RF_1)
3912 MSA_3RF_LIST2(MSA_3RF_2)
3913 #undef MSA_3RF_1
3914 #undef MSA_3RF_2
3915 #undef MSA_3RF_FORMAT
3916 #undef MSA_3RF_LIST1
3917 #undef MSA_3RF_LIST2
3918 
3919 void Assembler::sldi_b(MSARegister wd, MSARegister ws, uint32_t n) {
3920   GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_B, n, ws, wd);
3921 }
3922 
sldi_h(MSARegister wd,MSARegister ws,uint32_t n)3923 void Assembler::sldi_h(MSARegister wd, MSARegister ws, uint32_t n) {
3924   GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_H, n, ws, wd);
3925 }
3926 
sldi_w(MSARegister wd,MSARegister ws,uint32_t n)3927 void Assembler::sldi_w(MSARegister wd, MSARegister ws, uint32_t n) {
3928   GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_W, n, ws, wd);
3929 }
3930 
sldi_d(MSARegister wd,MSARegister ws,uint32_t n)3931 void Assembler::sldi_d(MSARegister wd, MSARegister ws, uint32_t n) {
3932   GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_D, n, ws, wd);
3933 }
3934 
splati_b(MSARegister wd,MSARegister ws,uint32_t n)3935 void Assembler::splati_b(MSARegister wd, MSARegister ws, uint32_t n) {
3936   GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_B, n, ws, wd);
3937 }
3938 
splati_h(MSARegister wd,MSARegister ws,uint32_t n)3939 void Assembler::splati_h(MSARegister wd, MSARegister ws, uint32_t n) {
3940   GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_H, n, ws, wd);
3941 }
3942 
splati_w(MSARegister wd,MSARegister ws,uint32_t n)3943 void Assembler::splati_w(MSARegister wd, MSARegister ws, uint32_t n) {
3944   GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_W, n, ws, wd);
3945 }
3946 
splati_d(MSARegister wd,MSARegister ws,uint32_t n)3947 void Assembler::splati_d(MSARegister wd, MSARegister ws, uint32_t n) {
3948   GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_D, n, ws, wd);
3949 }
3950 
copy_s_b(Register rd,MSARegister ws,uint32_t n)3951 void Assembler::copy_s_b(Register rd, MSARegister ws, uint32_t n) {
3952   GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_B, n, ws, rd);
3953 }
3954 
copy_s_h(Register rd,MSARegister ws,uint32_t n)3955 void Assembler::copy_s_h(Register rd, MSARegister ws, uint32_t n) {
3956   GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_H, n, ws, rd);
3957 }
3958 
copy_s_w(Register rd,MSARegister ws,uint32_t n)3959 void Assembler::copy_s_w(Register rd, MSARegister ws, uint32_t n) {
3960   GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_W, n, ws, rd);
3961 }
3962 
copy_s_d(Register rd,MSARegister ws,uint32_t n)3963 void Assembler::copy_s_d(Register rd, MSARegister ws, uint32_t n) {
3964   GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_D, n, ws, rd);
3965 }
3966 
copy_u_b(Register rd,MSARegister ws,uint32_t n)3967 void Assembler::copy_u_b(Register rd, MSARegister ws, uint32_t n) {
3968   GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_B, n, ws, rd);
3969 }
3970 
copy_u_h(Register rd,MSARegister ws,uint32_t n)3971 void Assembler::copy_u_h(Register rd, MSARegister ws, uint32_t n) {
3972   GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_H, n, ws, rd);
3973 }
3974 
copy_u_w(Register rd,MSARegister ws,uint32_t n)3975 void Assembler::copy_u_w(Register rd, MSARegister ws, uint32_t n) {
3976   GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_W, n, ws, rd);
3977 }
3978 
insert_b(MSARegister wd,uint32_t n,Register rs)3979 void Assembler::insert_b(MSARegister wd, uint32_t n, Register rs) {
3980   GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_B, n, rs, wd);
3981 }
3982 
insert_h(MSARegister wd,uint32_t n,Register rs)3983 void Assembler::insert_h(MSARegister wd, uint32_t n, Register rs) {
3984   GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_H, n, rs, wd);
3985 }
3986 
insert_w(MSARegister wd,uint32_t n,Register rs)3987 void Assembler::insert_w(MSARegister wd, uint32_t n, Register rs) {
3988   GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_W, n, rs, wd);
3989 }
3990 
insert_d(MSARegister wd,uint32_t n,Register rs)3991 void Assembler::insert_d(MSARegister wd, uint32_t n, Register rs) {
3992   GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_D, n, rs, wd);
3993 }
3994 
insve_b(MSARegister wd,uint32_t n,MSARegister ws)3995 void Assembler::insve_b(MSARegister wd, uint32_t n, MSARegister ws) {
3996   GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_B, n, ws, wd);
3997 }
3998 
insve_h(MSARegister wd,uint32_t n,MSARegister ws)3999 void Assembler::insve_h(MSARegister wd, uint32_t n, MSARegister ws) {
4000   GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_H, n, ws, wd);
4001 }
4002 
insve_w(MSARegister wd,uint32_t n,MSARegister ws)4003 void Assembler::insve_w(MSARegister wd, uint32_t n, MSARegister ws) {
4004   GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_W, n, ws, wd);
4005 }
4006 
insve_d(MSARegister wd,uint32_t n,MSARegister ws)4007 void Assembler::insve_d(MSARegister wd, uint32_t n, MSARegister ws) {
4008   GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_D, n, ws, wd);
4009 }
4010 
move_v(MSARegister wd,MSARegister ws)4011 void Assembler::move_v(MSARegister wd, MSARegister ws) {
4012   DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
4013   DCHECK(ws.is_valid() && wd.is_valid());
4014   Instr instr = MSA | MOVE_V | (ws.code() << kWsShift) |
4015                 (wd.code() << kWdShift) | MSA_ELM_MINOR;
4016   emit(instr);
4017 }
4018 
ctcmsa(MSAControlRegister cd,Register rs)4019 void Assembler::ctcmsa(MSAControlRegister cd, Register rs) {
4020   DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
4021   DCHECK(cd.is_valid() && rs.is_valid());
4022   Instr instr = MSA | CTCMSA | (rs.code() << kWsShift) |
4023                 (cd.code() << kWdShift) | MSA_ELM_MINOR;
4024   emit(instr);
4025 }
4026 
cfcmsa(Register rd,MSAControlRegister cs)4027 void Assembler::cfcmsa(Register rd, MSAControlRegister cs) {
4028   DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
4029   DCHECK(rd.is_valid() && cs.is_valid());
4030   Instr instr = MSA | CFCMSA | (cs.code() << kWsShift) |
4031                 (rd.code() << kWdShift) | MSA_ELM_MINOR;
4032   emit(instr);
4033 }
4034 
4035 #define MSA_BIT_LIST(V) \
4036   V(slli, SLLI)         \
4037   V(srai, SRAI)         \
4038   V(srli, SRLI)         \
4039   V(bclri, BCLRI)       \
4040   V(bseti, BSETI)       \
4041   V(bnegi, BNEGI)       \
4042   V(binsli, BINSLI)     \
4043   V(binsri, BINSRI)     \
4044   V(sat_s, SAT_S)       \
4045   V(sat_u, SAT_U)       \
4046   V(srari, SRARI)       \
4047   V(srlri, SRLRI)
4048 
4049 #define MSA_BIT_FORMAT(name, opcode, format)                      \
4050   void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
4051                                   uint32_t m) {                   \
4052     GenInstrMsaBit(opcode, BIT_DF_##format, m, ws, wd);           \
4053   }
4054 
4055 #define MSA_BIT(name, opcode)     \
4056   MSA_BIT_FORMAT(name, opcode, b) \
4057   MSA_BIT_FORMAT(name, opcode, h) \
4058   MSA_BIT_FORMAT(name, opcode, w) \
4059   MSA_BIT_FORMAT(name, opcode, d)
4060 
MSA_BIT_LIST(MSA_BIT)4061 MSA_BIT_LIST(MSA_BIT)
4062 #undef MSA_BIT
4063 #undef MSA_BIT_FORMAT
4064 #undef MSA_BIT_LIST
4065 
4066 int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
4067                                          intptr_t pc_delta) {
4068   if (RelocInfo::IsInternalReference(rmode)) {
4069     int64_t* p = reinterpret_cast<int64_t*>(pc);
4070     if (*p == kEndOfJumpChain) {
4071       return 0;  // Number of instructions patched.
4072     }
4073     *p += pc_delta;
4074     return 2;  // Number of instructions patched.
4075   }
4076   Instr instr = instr_at(pc);
4077   DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
4078   if (IsLui(instr)) {
4079     Instr instr_lui = instr_at(pc + 0 * kInstrSize);
4080     Instr instr_ori = instr_at(pc + 1 * kInstrSize);
4081     Instr instr_ori2 = instr_at(pc + 3 * kInstrSize);
4082     DCHECK(IsOri(instr_ori));
4083     DCHECK(IsOri(instr_ori2));
4084     // TODO(plind): symbolic names for the shifts.
4085     int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48;
4086     imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32;
4087     imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16;
4088     // Sign extend address.
4089     imm >>= 16;
4090 
4091     if (imm == kEndOfJumpChain) {
4092       return 0;  // Number of instructions patched.
4093     }
4094     imm += pc_delta;
4095     DCHECK_EQ(imm & 3, 0);
4096 
4097     instr_lui &= ~kImm16Mask;
4098     instr_ori &= ~kImm16Mask;
4099     instr_ori2 &= ~kImm16Mask;
4100 
4101     instr_at_put(pc + 0 * kInstrSize, instr_lui | ((imm >> 32) & kImm16Mask));
4102     instr_at_put(pc + 1 * kInstrSize, instr_ori | (imm >> 16 & kImm16Mask));
4103     instr_at_put(pc + 3 * kInstrSize, instr_ori2 | (imm & kImm16Mask));
4104     return 4;  // Number of instructions patched.
4105   } else if (IsJ(instr) || IsJal(instr)) {
4106     // Regular j/jal relocation.
4107     uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
4108     imm28 += pc_delta;
4109     imm28 &= kImm28Mask;
4110     instr &= ~kImm26Mask;
4111     DCHECK_EQ(imm28 & 3, 0);
4112     uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
4113     instr_at_put(pc, instr | (imm26 & kImm26Mask));
4114     return 1;  // Number of instructions patched.
4115   } else {
4116     DCHECK(((instr & kJumpRawMask) == kJRawMark) ||
4117            ((instr & kJumpRawMask) == kJalRawMark));
4118     // Unbox raw offset and emit j/jal.
4119     int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
4120     // Sign extend 28-bit offset to 32-bit.
4121     imm28 = (imm28 << 4) >> 4;
4122     uint64_t target =
4123         static_cast<int64_t>(imm28) + reinterpret_cast<uint64_t>(pc);
4124     target &= kImm28Mask;
4125     DCHECK_EQ(imm28 & 3, 0);
4126     uint32_t imm26 = static_cast<uint32_t>(target >> 2);
4127     // Check markings whether to emit j or jal.
4128     uint32_t unbox = (instr & kJRawMark) ? J : JAL;
4129     instr_at_put(pc, unbox | (imm26 & kImm26Mask));
4130     return 1;  // Number of instructions patched.
4131   }
4132 }
4133 
4134 
GrowBuffer()4135 void Assembler::GrowBuffer() {
4136   if (!own_buffer_) FATAL("external code buffer is too small");
4137 
4138   // Compute new buffer size.
4139   CodeDesc desc;  // the new buffer
4140   if (buffer_size_ < 1 * MB) {
4141     desc.buffer_size = 2*buffer_size_;
4142   } else {
4143     desc.buffer_size = buffer_size_ + 1*MB;
4144   }
4145 
4146   // Some internal data structures overflow for very large buffers,
4147   // they must ensure that kMaximalBufferSize is not too large.
4148   if (desc.buffer_size > kMaximalBufferSize) {
4149     V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
4150   }
4151 
4152   // Set up new buffer.
4153   desc.buffer = NewArray<byte>(desc.buffer_size);
4154   desc.origin = this;
4155 
4156   desc.instr_size = pc_offset();
4157   desc.reloc_size =
4158       static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
4159 
4160   // Copy the data.
4161   intptr_t pc_delta = desc.buffer - buffer_;
4162   intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
4163       (buffer_ + buffer_size_);
4164   MemMove(desc.buffer, buffer_, desc.instr_size);
4165   MemMove(reloc_info_writer.pos() + rc_delta,
4166               reloc_info_writer.pos(), desc.reloc_size);
4167 
4168   // Switch buffers.
4169   DeleteArray(buffer_);
4170   buffer_ = desc.buffer;
4171   buffer_size_ = desc.buffer_size;
4172   pc_ += pc_delta;
4173   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
4174                                reloc_info_writer.last_pc() + pc_delta);
4175 
4176   // Relocate runtime entries.
4177   for (RelocIterator it(desc); !it.done(); it.next()) {
4178     RelocInfo::Mode rmode = it.rinfo()->rmode();
4179     if (rmode == RelocInfo::INTERNAL_REFERENCE) {
4180       RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta);
4181     }
4182   }
4183   DCHECK(!overflow());
4184 }
4185 
4186 
db(uint8_t data)4187 void Assembler::db(uint8_t data) {
4188   CheckForEmitInForbiddenSlot();
4189   EmitHelper(data);
4190 }
4191 
4192 
dd(uint32_t data)4193 void Assembler::dd(uint32_t data) {
4194   CheckForEmitInForbiddenSlot();
4195   EmitHelper(data);
4196 }
4197 
4198 
dq(uint64_t data)4199 void Assembler::dq(uint64_t data) {
4200   CheckForEmitInForbiddenSlot();
4201   EmitHelper(data);
4202 }
4203 
4204 
dd(Label * label)4205 void Assembler::dd(Label* label) {
4206   uint64_t data;
4207   CheckForEmitInForbiddenSlot();
4208   if (label->is_bound()) {
4209     data = reinterpret_cast<uint64_t>(buffer_ + label->pos());
4210   } else {
4211     data = jump_address(label);
4212     unbound_labels_count_++;
4213     internal_reference_positions_.insert(label->pos());
4214   }
4215   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
4216   EmitHelper(data);
4217 }
4218 
4219 
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)4220 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
4221   // We do not try to reuse pool constants.
4222   RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
4223   if (!RelocInfo::IsNone(rinfo.rmode())) {
4224     if (options().disable_reloc_info_for_patching) return;
4225     // Don't record external references unless the heap will be serialized.
4226     if (RelocInfo::IsOnlyForSerializer(rmode) &&
4227         !options().record_reloc_info_for_serialization && !emit_debug_code()) {
4228       return;
4229     }
4230     DCHECK_GE(buffer_space(), kMaxRelocSize);  // Too late to grow buffer here.
4231     reloc_info_writer.Write(&rinfo);
4232   }
4233 }
4234 
4235 
BlockTrampolinePoolFor(int instructions)4236 void Assembler::BlockTrampolinePoolFor(int instructions) {
4237   CheckTrampolinePoolQuick(instructions);
4238   BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
4239 }
4240 
4241 
CheckTrampolinePool()4242 void Assembler::CheckTrampolinePool() {
4243   // Some small sequences of instructions must not be broken up by the
4244   // insertion of a trampoline pool; such sequences are protected by setting
4245   // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
4246   // which are both checked here. Also, recursive calls to CheckTrampolinePool
4247   // are blocked by trampoline_pool_blocked_nesting_.
4248   if ((trampoline_pool_blocked_nesting_ > 0) ||
4249       (pc_offset() < no_trampoline_pool_before_)) {
4250     // Emission is currently blocked; make sure we try again as soon as
4251     // possible.
4252     if (trampoline_pool_blocked_nesting_ > 0) {
4253       next_buffer_check_ = pc_offset() + kInstrSize;
4254     } else {
4255       next_buffer_check_ = no_trampoline_pool_before_;
4256     }
4257     return;
4258   }
4259 
4260   DCHECK(!trampoline_emitted_);
4261   DCHECK_GE(unbound_labels_count_, 0);
4262   if (unbound_labels_count_ > 0) {
4263     // First we emit jump (2 instructions), then we emit trampoline pool.
4264     { BlockTrampolinePoolScope block_trampoline_pool(this);
4265       Label after_pool;
4266       if (kArchVariant == kMips64r6) {
4267         bc(&after_pool);
4268       } else {
4269         b(&after_pool);
4270       }
4271       nop();
4272 
4273       int pool_start = pc_offset();
4274       for (int i = 0; i < unbound_labels_count_; i++) {
4275         {
4276           if (kArchVariant == kMips64r6) {
4277             bc(&after_pool);
4278             nop();
4279           } else {
4280             or_(t8, ra, zero_reg);
4281             nal();       // Read PC into ra register.
4282             lui(t9, 0);  // Branch delay slot.
4283             ori(t9, t9, 0);
4284             daddu(t9, ra, t9);
4285             or_(ra, t8, zero_reg);
4286             // Instruction jr will take or_ from the next trampoline.
4287             // in its branch delay slot. This is the expected behavior
4288             // in order to decrease size of trampoline pool.
4289             jr(t9);
4290           }
4291         }
4292       }
4293       nop();
4294       bind(&after_pool);
4295       trampoline_ = Trampoline(pool_start, unbound_labels_count_);
4296 
4297       trampoline_emitted_ = true;
4298       // As we are only going to emit trampoline once, we need to prevent any
4299       // further emission.
4300       next_buffer_check_ = kMaxInt;
4301     }
4302   } else {
4303     // Number of branches to unbound label at this point is zero, so we can
4304     // move next buffer check to maximum.
4305     next_buffer_check_ = pc_offset() +
4306         kMaxBranchOffset - kTrampolineSlotsSize * 16;
4307   }
4308   return;
4309 }
4310 
4311 
target_address_at(Address pc)4312 Address Assembler::target_address_at(Address pc) {
4313   Instr instr0 = instr_at(pc);
4314   Instr instr1 = instr_at(pc + 1 * kInstrSize);
4315   Instr instr3 = instr_at(pc + 3 * kInstrSize);
4316 
4317   // Interpret 4 instructions for address generated by li: See listing in
4318   // Assembler::set_target_address_at() just below.
4319   if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) &&
4320       (GetOpcodeField(instr3) == ORI)) {
4321     // Assemble the 48 bit value.
4322      int64_t addr  = static_cast<int64_t>(
4323           ((uint64_t)(GetImmediate16(instr0)) << 32) |
4324           ((uint64_t)(GetImmediate16(instr1)) << 16) |
4325           ((uint64_t)(GetImmediate16(instr3))));
4326 
4327     // Sign extend to get canonical address.
4328     addr = (addr << 16) >> 16;
4329     return static_cast<Address>(addr);
4330   }
4331   // We should never get here, force a bad address if we do.
4332   UNREACHABLE();
4333 }
4334 
4335 
4336 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
4337 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
4338 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
4339 // OS::nan_value() returns a qNaN.
QuietNaN(HeapObject * object)4340 void Assembler::QuietNaN(HeapObject* object) {
4341   HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
4342 }
4343 
4344 
4345 // On Mips64, a target address is stored in a 4-instruction sequence:
4346 //    0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
4347 //    1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
4348 //    2: dsll(rd, rd, 16);
4349 //    3: ori(rd, rd, j.imm32_ & kImm16Mask);
4350 //
4351 // Patching the address must replace all the lui & ori instructions,
4352 // and flush the i-cache.
4353 //
4354 // There is an optimization below, which emits a nop when the address
4355 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
4356 // and possibly removed.
set_target_value_at(Address pc,uint64_t target,ICacheFlushMode icache_flush_mode)4357 void Assembler::set_target_value_at(Address pc, uint64_t target,
4358                                     ICacheFlushMode icache_flush_mode) {
4359   // There is an optimization where only 4 instructions are used to load address
4360   // in code on MIP64 because only 48-bits of address is effectively used.
4361   // It relies on fact the upper [63:48] bits are not used for virtual address
4362   // translation and they have to be set according to value of bit 47 in order
4363   // get canonical address.
4364   Instr instr1 = instr_at(pc + kInstrSize);
4365   uint32_t rt_code = GetRt(instr1);
4366   uint32_t* p = reinterpret_cast<uint32_t*>(pc);
4367 
4368 #ifdef DEBUG
4369   // Check we have the result from a li macro-instruction.
4370   Instr instr0 = instr_at(pc);
4371   Instr instr3 = instr_at(pc + kInstrSize * 3);
4372   DCHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
4373           GetOpcodeField(instr3) == ORI));
4374 #endif
4375 
4376   // Must use 4 instructions to insure patchable code.
4377   // lui rt, upper-16.
4378   // ori rt, rt, lower-16.
4379   // dsll rt, rt, 16.
4380   // ori rt rt, lower-16.
4381   *p = LUI | (rt_code << kRtShift) | ((target >> 32) & kImm16Mask);
4382   *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift) |
4383              ((target >> 16) & kImm16Mask);
4384   *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift) |
4385              (target & kImm16Mask);
4386 
4387   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
4388     Assembler::FlushICache(pc, 4 * kInstrSize);
4389   }
4390 }
4391 
UseScratchRegisterScope(Assembler * assembler)4392 UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
4393     : available_(assembler->GetScratchRegisterList()),
4394       old_available_(*available_) {}
4395 
~UseScratchRegisterScope()4396 UseScratchRegisterScope::~UseScratchRegisterScope() {
4397   *available_ = old_available_;
4398 }
4399 
Acquire()4400 Register UseScratchRegisterScope::Acquire() {
4401   DCHECK_NOT_NULL(available_);
4402   DCHECK_NE(*available_, 0);
4403   int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
4404   *available_ &= ~(1UL << index);
4405 
4406   return Register::from_code(index);
4407 }
4408 
hasAvailable() const4409 bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; }
4410 
4411 }  // namespace internal
4412 }  // namespace v8
4413 
4414 #endif  // V8_TARGET_ARCH_MIPS64
4415