1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34
35 #include "src/mips/assembler-mips.h"
36
37 #if V8_TARGET_ARCH_MIPS
38
39 #include "src/base/bits.h"
40 #include "src/base/cpu.h"
41 #include "src/code-stubs.h"
42 #include "src/deoptimizer.h"
43 #include "src/mips/assembler-mips-inl.h"
44
45 namespace v8 {
46 namespace internal {
47
48 // Get the CPU features enabled by the build. For cross compilation the
49 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
50 // can be defined to enable FPU instructions when building the
51 // snapshot.
CpuFeaturesImpliedByCompiler()52 static unsigned CpuFeaturesImpliedByCompiler() {
53 unsigned answer = 0;
54 #ifdef CAN_USE_FPU_INSTRUCTIONS
55 answer |= 1u << FPU;
56 #endif // def CAN_USE_FPU_INSTRUCTIONS
57
58 // If the compiler is allowed to use FPU then we can use FPU too in our code
59 // generation even when generating snapshots. This won't work for cross
60 // compilation.
61 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
62 answer |= 1u << FPU;
63 #endif
64
65 return answer;
66 }
67
68
ProbeImpl(bool cross_compile)69 void CpuFeatures::ProbeImpl(bool cross_compile) {
70 supported_ |= CpuFeaturesImpliedByCompiler();
71
72 // Only use statically determined features for cross compile (snapshot).
73 if (cross_compile) return;
74
75 // If the compiler is allowed to use fpu then we can use fpu too in our
76 // code generation.
77 #ifndef __mips__
78 // For the simulator build, use FPU.
79 supported_ |= 1u << FPU;
80 #if defined(_MIPS_ARCH_MIPS32R6)
81 // FP64 mode is implied on r6.
82 supported_ |= 1u << FP64FPU;
83 #if defined(_MIPS_MSA)
84 supported_ |= 1u << MIPS_SIMD;
85 #endif
86 #endif
87 #if defined(FPU_MODE_FP64)
88 supported_ |= 1u << FP64FPU;
89 #endif
90 #else
91 // Probe for additional features at runtime.
92 base::CPU cpu;
93 if (cpu.has_fpu()) supported_ |= 1u << FPU;
94 #if defined(FPU_MODE_FPXX)
95 if (cpu.is_fp64_mode()) supported_ |= 1u << FP64FPU;
96 #elif defined(FPU_MODE_FP64)
97 supported_ |= 1u << FP64FPU;
98 #if defined(_MIPS_ARCH_MIPS32R6)
99 #if defined(_MIPS_MSA)
100 supported_ |= 1u << MIPS_SIMD;
101 #else
102 if (cpu.has_msa()) supported_ |= 1u << MIPS_SIMD;
103 #endif
104 #endif
105 #endif
106 #if defined(_MIPS_ARCH_MIPS32RX)
107 if (cpu.architecture() == 6) {
108 supported_ |= 1u << MIPSr6;
109 } else if (cpu.architecture() == 2) {
110 supported_ |= 1u << MIPSr1;
111 supported_ |= 1u << MIPSr2;
112 } else {
113 supported_ |= 1u << MIPSr1;
114 }
115 #endif
116 #endif
117 }
118
119
PrintTarget()120 void CpuFeatures::PrintTarget() { }
PrintFeatures()121 void CpuFeatures::PrintFeatures() { }
122
123
ToNumber(Register reg)124 int ToNumber(Register reg) {
125 DCHECK(reg.is_valid());
126 const int kNumbers[] = {
127 0, // zero_reg
128 1, // at
129 2, // v0
130 3, // v1
131 4, // a0
132 5, // a1
133 6, // a2
134 7, // a3
135 8, // t0
136 9, // t1
137 10, // t2
138 11, // t3
139 12, // t4
140 13, // t5
141 14, // t6
142 15, // t7
143 16, // s0
144 17, // s1
145 18, // s2
146 19, // s3
147 20, // s4
148 21, // s5
149 22, // s6
150 23, // s7
151 24, // t8
152 25, // t9
153 26, // k0
154 27, // k1
155 28, // gp
156 29, // sp
157 30, // fp
158 31, // ra
159 };
160 return kNumbers[reg.code()];
161 }
162
163
ToRegister(int num)164 Register ToRegister(int num) {
165 DCHECK(num >= 0 && num < kNumRegisters);
166 const Register kRegisters[] = {
167 zero_reg,
168 at,
169 v0, v1,
170 a0, a1, a2, a3,
171 t0, t1, t2, t3, t4, t5, t6, t7,
172 s0, s1, s2, s3, s4, s5, s6, s7,
173 t8, t9,
174 k0, k1,
175 gp,
176 sp,
177 fp,
178 ra
179 };
180 return kRegisters[num];
181 }
182
183
184 // -----------------------------------------------------------------------------
185 // Implementation of RelocInfo.
186
187 const int RelocInfo::kApplyMask =
188 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
189 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
190
IsCodedSpecially()191 bool RelocInfo::IsCodedSpecially() {
192 // The deserializer needs to know whether a pointer is specially coded. Being
193 // specially coded on MIPS means that it is a lui/ori instruction, and that is
194 // always the case inside code objects.
195 return true;
196 }
197
198
IsInConstantPool()199 bool RelocInfo::IsInConstantPool() {
200 return false;
201 }
202
GetDeoptimizationId(Isolate * isolate,DeoptimizeKind kind)203 int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
204 DCHECK(IsRuntimeEntry(rmode_));
205 return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
206 }
207
set_js_to_wasm_address(Address address,ICacheFlushMode icache_flush_mode)208 void RelocInfo::set_js_to_wasm_address(Address address,
209 ICacheFlushMode icache_flush_mode) {
210 DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
211 Assembler::set_target_address_at(pc_, constant_pool_, address,
212 icache_flush_mode);
213 }
214
js_to_wasm_address() const215 Address RelocInfo::js_to_wasm_address() const {
216 DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
217 return Assembler::target_address_at(pc_, constant_pool_);
218 }
219
wasm_call_tag() const220 uint32_t RelocInfo::wasm_call_tag() const {
221 DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
222 return static_cast<uint32_t>(
223 Assembler::target_address_at(pc_, constant_pool_));
224 }
225
226 // -----------------------------------------------------------------------------
227 // Implementation of Operand and MemOperand.
228 // See assembler-mips-inl.h for inlined constructors.
229
Operand(Handle<HeapObject> handle)230 Operand::Operand(Handle<HeapObject> handle)
231 : rm_(no_reg), rmode_(RelocInfo::EMBEDDED_OBJECT) {
232 value_.immediate = static_cast<intptr_t>(handle.address());
233 }
234
EmbeddedNumber(double value)235 Operand Operand::EmbeddedNumber(double value) {
236 int32_t smi;
237 if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
238 Operand result(0, RelocInfo::EMBEDDED_OBJECT);
239 result.is_heap_object_request_ = true;
240 result.value_.heap_object_request = HeapObjectRequest(value);
241 return result;
242 }
243
EmbeddedCode(CodeStub * stub)244 Operand Operand::EmbeddedCode(CodeStub* stub) {
245 Operand result(0, RelocInfo::CODE_TARGET);
246 result.is_heap_object_request_ = true;
247 result.value_.heap_object_request = HeapObjectRequest(stub);
248 return result;
249 }
250
MemOperand(Register rm,int32_t offset)251 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
252 offset_ = offset;
253 }
254
255
MemOperand(Register rm,int32_t unit,int32_t multiplier,OffsetAddend offset_addend)256 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
257 OffsetAddend offset_addend) : Operand(rm) {
258 offset_ = unit * multiplier + offset_addend;
259 }
260
AllocateAndInstallRequestedHeapObjects(Isolate * isolate)261 void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
262 for (auto& request : heap_object_requests_) {
263 Handle<HeapObject> object;
264 switch (request.kind()) {
265 case HeapObjectRequest::kHeapNumber:
266 object =
267 isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
268 break;
269 case HeapObjectRequest::kCodeStub:
270 request.code_stub()->set_isolate(isolate);
271 object = request.code_stub()->GetCode();
272 break;
273 }
274 Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
275 set_target_value_at(pc, reinterpret_cast<uint32_t>(object.location()));
276 }
277 }
278
279 // -----------------------------------------------------------------------------
280 // Specific instructions, constants, and masks.
281
282 static const int kNegOffset = 0x00008000;
283 // addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
284 // operations as post-increment of sp.
285 const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift) |
286 (sp.code() << kRtShift) |
287 (kPointerSize & kImm16Mask); // NOLINT
288 // addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
289 const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift) |
290 (sp.code() << kRtShift) |
291 (-kPointerSize & kImm16Mask); // NOLINT
292 // sw(r, MemOperand(sp, 0))
293 const Instr kPushRegPattern =
294 SW | (sp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
295 // lw(r, MemOperand(sp, 0))
296 const Instr kPopRegPattern =
297 LW | (sp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
298
299 const Instr kLwRegFpOffsetPattern =
300 LW | (fp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
301
302 const Instr kSwRegFpOffsetPattern =
303 SW | (fp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
304
305 const Instr kLwRegFpNegOffsetPattern =
306 LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
307
308 const Instr kSwRegFpNegOffsetPattern =
309 SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
310 // A mask for the Rt register for push, pop, lw, sw instructions.
311 const Instr kRtMask = kRtFieldMask;
312 const Instr kLwSwInstrTypeMask = 0xFFE00000;
313 const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
314 const Instr kLwSwOffsetMask = kImm16Mask;
315
Assembler(const AssemblerOptions & options,void * buffer,int buffer_size)316 Assembler::Assembler(const AssemblerOptions& options, void* buffer,
317 int buffer_size)
318 : AssemblerBase(options, buffer, buffer_size),
319 scratch_register_list_(at.bit()) {
320 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
321
322 last_trampoline_pool_end_ = 0;
323 no_trampoline_pool_before_ = 0;
324 trampoline_pool_blocked_nesting_ = 0;
325 // We leave space (16 * kTrampolineSlotsSize)
326 // for BlockTrampolinePoolScope buffer.
327 next_buffer_check_ = FLAG_force_long_branches
328 ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
329 internal_trampoline_exception_ = false;
330 last_bound_pos_ = 0;
331
332 trampoline_emitted_ = FLAG_force_long_branches;
333 unbound_labels_count_ = 0;
334 block_buffer_growth_ = false;
335 }
336
GetCode(Isolate * isolate,CodeDesc * desc)337 void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
338 EmitForbiddenSlotInstruction();
339 DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
340
341 AllocateAndInstallRequestedHeapObjects(isolate);
342
343 // Set up code descriptor.
344 desc->buffer = buffer_;
345 desc->buffer_size = buffer_size_;
346 desc->instr_size = pc_offset();
347 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
348 desc->origin = this;
349 desc->constant_pool_size = 0;
350 desc->unwinding_info_size = 0;
351 desc->unwinding_info = nullptr;
352 }
353
354
Align(int m)355 void Assembler::Align(int m) {
356 DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
357 EmitForbiddenSlotInstruction();
358 while ((pc_offset() & (m - 1)) != 0) {
359 nop();
360 }
361 }
362
363
CodeTargetAlign()364 void Assembler::CodeTargetAlign() {
365 // No advantage to aligning branch/call targets to more than
366 // single instruction, that I am aware of.
367 Align(4);
368 }
369
370
GetRtReg(Instr instr)371 Register Assembler::GetRtReg(Instr instr) {
372 return Register::from_code((instr & kRtFieldMask) >> kRtShift);
373 }
374
375
GetRsReg(Instr instr)376 Register Assembler::GetRsReg(Instr instr) {
377 return Register::from_code((instr & kRsFieldMask) >> kRsShift);
378 }
379
380
GetRdReg(Instr instr)381 Register Assembler::GetRdReg(Instr instr) {
382 return Register::from_code((instr & kRdFieldMask) >> kRdShift);
383 }
384
385
GetRt(Instr instr)386 uint32_t Assembler::GetRt(Instr instr) {
387 return (instr & kRtFieldMask) >> kRtShift;
388 }
389
390
GetRtField(Instr instr)391 uint32_t Assembler::GetRtField(Instr instr) {
392 return instr & kRtFieldMask;
393 }
394
395
GetRs(Instr instr)396 uint32_t Assembler::GetRs(Instr instr) {
397 return (instr & kRsFieldMask) >> kRsShift;
398 }
399
400
GetRsField(Instr instr)401 uint32_t Assembler::GetRsField(Instr instr) {
402 return instr & kRsFieldMask;
403 }
404
405
GetRd(Instr instr)406 uint32_t Assembler::GetRd(Instr instr) {
407 return (instr & kRdFieldMask) >> kRdShift;
408 }
409
410
GetRdField(Instr instr)411 uint32_t Assembler::GetRdField(Instr instr) {
412 return instr & kRdFieldMask;
413 }
414
415
GetSa(Instr instr)416 uint32_t Assembler::GetSa(Instr instr) {
417 return (instr & kSaFieldMask) >> kSaShift;
418 }
419
420
GetSaField(Instr instr)421 uint32_t Assembler::GetSaField(Instr instr) {
422 return instr & kSaFieldMask;
423 }
424
425
GetOpcodeField(Instr instr)426 uint32_t Assembler::GetOpcodeField(Instr instr) {
427 return instr & kOpcodeMask;
428 }
429
430
GetFunction(Instr instr)431 uint32_t Assembler::GetFunction(Instr instr) {
432 return (instr & kFunctionFieldMask) >> kFunctionShift;
433 }
434
435
GetFunctionField(Instr instr)436 uint32_t Assembler::GetFunctionField(Instr instr) {
437 return instr & kFunctionFieldMask;
438 }
439
440
GetImmediate16(Instr instr)441 uint32_t Assembler::GetImmediate16(Instr instr) {
442 return instr & kImm16Mask;
443 }
444
445
GetLabelConst(Instr instr)446 uint32_t Assembler::GetLabelConst(Instr instr) {
447 return instr & ~kImm16Mask;
448 }
449
450
IsPop(Instr instr)451 bool Assembler::IsPop(Instr instr) {
452 return (instr & ~kRtMask) == kPopRegPattern;
453 }
454
455
IsPush(Instr instr)456 bool Assembler::IsPush(Instr instr) {
457 return (instr & ~kRtMask) == kPushRegPattern;
458 }
459
460
IsSwRegFpOffset(Instr instr)461 bool Assembler::IsSwRegFpOffset(Instr instr) {
462 return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
463 }
464
465
IsLwRegFpOffset(Instr instr)466 bool Assembler::IsLwRegFpOffset(Instr instr) {
467 return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
468 }
469
470
IsSwRegFpNegOffset(Instr instr)471 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
472 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
473 kSwRegFpNegOffsetPattern);
474 }
475
476
IsLwRegFpNegOffset(Instr instr)477 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
478 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
479 kLwRegFpNegOffsetPattern);
480 }
481
482
483 // Labels refer to positions in the (to be) generated code.
484 // There are bound, linked, and unused labels.
485 //
486 // Bound labels refer to known positions in the already
487 // generated code. pos() is the position the label refers to.
488 //
489 // Linked labels refer to unknown positions in the code
490 // to be generated; pos() is the position of the last
491 // instruction using the label.
492
493 // The link chain is terminated by a value in the instruction of -1,
494 // which is an otherwise illegal value (branch -1 is inf loop).
495 // The instruction 16-bit offset field addresses 32-bit words, but in
496 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
497
498 const int kEndOfChain = -4;
499 // Determines the end of the Jump chain (a subset of the label link chain).
500 const int kEndOfJumpChain = 0;
501
IsMsaBranch(Instr instr)502 bool Assembler::IsMsaBranch(Instr instr) {
503 uint32_t opcode = GetOpcodeField(instr);
504 uint32_t rs_field = GetRsField(instr);
505 if (opcode == COP1) {
506 switch (rs_field) {
507 case BZ_V:
508 case BZ_B:
509 case BZ_H:
510 case BZ_W:
511 case BZ_D:
512 case BNZ_V:
513 case BNZ_B:
514 case BNZ_H:
515 case BNZ_W:
516 case BNZ_D:
517 return true;
518 default:
519 return false;
520 }
521 } else {
522 return false;
523 }
524 }
525
IsBranch(Instr instr)526 bool Assembler::IsBranch(Instr instr) {
527 uint32_t opcode = GetOpcodeField(instr);
528 uint32_t rt_field = GetRtField(instr);
529 uint32_t rs_field = GetRsField(instr);
530 // Checks if the instruction is a branch.
531 bool isBranch =
532 opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
533 opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
534 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
535 rt_field == BLTZAL || rt_field == BGEZAL)) ||
536 (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
537 (opcode == COP1 && rs_field == BC1EQZ) ||
538 (opcode == COP1 && rs_field == BC1NEZ) || IsMsaBranch(instr);
539 if (!isBranch && IsMipsArchVariant(kMips32r6)) {
540 // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
541 // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
542 isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
543 opcode == BALC ||
544 (opcode == POP66 && rs_field != 0) || // BEQZC
545 (opcode == POP76 && rs_field != 0); // BNEZC
546 }
547 return isBranch;
548 }
549
550
IsBc(Instr instr)551 bool Assembler::IsBc(Instr instr) {
552 uint32_t opcode = GetOpcodeField(instr);
553 // Checks if the instruction is a BC or BALC.
554 return opcode == BC || opcode == BALC;
555 }
556
IsNal(Instr instr)557 bool Assembler::IsNal(Instr instr) {
558 uint32_t opcode = GetOpcodeField(instr);
559 uint32_t rt_field = GetRtField(instr);
560 uint32_t rs_field = GetRsField(instr);
561 return opcode == REGIMM && rt_field == BLTZAL && rs_field == 0;
562 }
563
IsBzc(Instr instr)564 bool Assembler::IsBzc(Instr instr) {
565 uint32_t opcode = GetOpcodeField(instr);
566 // Checks if the instruction is BEQZC or BNEZC.
567 return (opcode == POP66 && GetRsField(instr) != 0) ||
568 (opcode == POP76 && GetRsField(instr) != 0);
569 }
570
571
IsEmittedConstant(Instr instr)572 bool Assembler::IsEmittedConstant(Instr instr) {
573 uint32_t label_constant = GetLabelConst(instr);
574 return label_constant == 0; // Emitted label const in reg-exp engine.
575 }
576
577
IsBeq(Instr instr)578 bool Assembler::IsBeq(Instr instr) {
579 return GetOpcodeField(instr) == BEQ;
580 }
581
582
IsBne(Instr instr)583 bool Assembler::IsBne(Instr instr) {
584 return GetOpcodeField(instr) == BNE;
585 }
586
587
IsBeqzc(Instr instr)588 bool Assembler::IsBeqzc(Instr instr) {
589 uint32_t opcode = GetOpcodeField(instr);
590 return opcode == POP66 && GetRsField(instr) != 0;
591 }
592
593
IsBnezc(Instr instr)594 bool Assembler::IsBnezc(Instr instr) {
595 uint32_t opcode = GetOpcodeField(instr);
596 return opcode == POP76 && GetRsField(instr) != 0;
597 }
598
599
IsBeqc(Instr instr)600 bool Assembler::IsBeqc(Instr instr) {
601 uint32_t opcode = GetOpcodeField(instr);
602 uint32_t rs = GetRsField(instr);
603 uint32_t rt = GetRtField(instr);
604 return opcode == POP10 && rs != 0 && rs < rt; // && rt != 0
605 }
606
607
IsBnec(Instr instr)608 bool Assembler::IsBnec(Instr instr) {
609 uint32_t opcode = GetOpcodeField(instr);
610 uint32_t rs = GetRsField(instr);
611 uint32_t rt = GetRtField(instr);
612 return opcode == POP30 && rs != 0 && rs < rt; // && rt != 0
613 }
614
IsJicOrJialc(Instr instr)615 bool Assembler::IsJicOrJialc(Instr instr) {
616 uint32_t opcode = GetOpcodeField(instr);
617 uint32_t rs = GetRsField(instr);
618 return (opcode == POP66 || opcode == POP76) && rs == 0;
619 }
620
IsJump(Instr instr)621 bool Assembler::IsJump(Instr instr) {
622 uint32_t opcode = GetOpcodeField(instr);
623 uint32_t rt_field = GetRtField(instr);
624 uint32_t rd_field = GetRdField(instr);
625 uint32_t function_field = GetFunctionField(instr);
626 // Checks if the instruction is a jump.
627 return opcode == J || opcode == JAL ||
628 (opcode == SPECIAL && rt_field == 0 &&
629 ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
630 }
631
IsJ(Instr instr)632 bool Assembler::IsJ(Instr instr) {
633 uint32_t opcode = GetOpcodeField(instr);
634 // Checks if the instruction is a jump.
635 return opcode == J;
636 }
637
638
IsJal(Instr instr)639 bool Assembler::IsJal(Instr instr) {
640 return GetOpcodeField(instr) == JAL;
641 }
642
643
IsJr(Instr instr)644 bool Assembler::IsJr(Instr instr) {
645 if (!IsMipsArchVariant(kMips32r6)) {
646 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
647 } else {
648 return GetOpcodeField(instr) == SPECIAL &&
649 GetRdField(instr) == 0 && GetFunctionField(instr) == JALR;
650 }
651 }
652
653
IsJalr(Instr instr)654 bool Assembler::IsJalr(Instr instr) {
655 return GetOpcodeField(instr) == SPECIAL &&
656 GetRdField(instr) != 0 && GetFunctionField(instr) == JALR;
657 }
658
659
IsLui(Instr instr)660 bool Assembler::IsLui(Instr instr) {
661 uint32_t opcode = GetOpcodeField(instr);
662 // Checks if the instruction is a load upper immediate.
663 return opcode == LUI;
664 }
665
666
IsOri(Instr instr)667 bool Assembler::IsOri(Instr instr) {
668 uint32_t opcode = GetOpcodeField(instr);
669 // Checks if the instruction is a load upper immediate.
670 return opcode == ORI;
671 }
672
IsMov(Instr instr,Register rd,Register rs)673 bool Assembler::IsMov(Instr instr, Register rd, Register rs) {
674 uint32_t opcode = GetOpcodeField(instr);
675 uint32_t rd_field = GetRd(instr);
676 uint32_t rs_field = GetRs(instr);
677 uint32_t rt_field = GetRt(instr);
678 uint32_t rd_reg = static_cast<uint32_t>(rd.code());
679 uint32_t rs_reg = static_cast<uint32_t>(rs.code());
680 uint32_t function_field = GetFunctionField(instr);
681 // Checks if the instruction is a OR with zero_reg argument (aka MOV).
682 bool res = opcode == SPECIAL && function_field == OR && rd_field == rd_reg &&
683 rs_field == rs_reg && rt_field == 0;
684 return res;
685 }
686
IsNop(Instr instr,unsigned int type)687 bool Assembler::IsNop(Instr instr, unsigned int type) {
688 // See Assembler::nop(type).
689 DCHECK_LT(type, 32);
690 uint32_t opcode = GetOpcodeField(instr);
691 uint32_t function = GetFunctionField(instr);
692 uint32_t rt = GetRt(instr);
693 uint32_t rd = GetRd(instr);
694 uint32_t sa = GetSa(instr);
695
696 // Traditional mips nop == sll(zero_reg, zero_reg, 0)
697 // When marking non-zero type, use sll(zero_reg, at, type)
698 // to avoid use of mips ssnop and ehb special encodings
699 // of the sll instruction.
700
701 Register nop_rt_reg = (type == 0) ? zero_reg : at;
702 bool ret = (opcode == SPECIAL && function == SLL &&
703 rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
704 rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
705 sa == type);
706
707 return ret;
708 }
709
710
GetBranchOffset(Instr instr)711 int32_t Assembler::GetBranchOffset(Instr instr) {
712 DCHECK(IsBranch(instr));
713 return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
714 }
715
716
IsLw(Instr instr)717 bool Assembler::IsLw(Instr instr) {
718 return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
719 }
720
721
GetLwOffset(Instr instr)722 int16_t Assembler::GetLwOffset(Instr instr) {
723 DCHECK(IsLw(instr));
724 return ((instr & kImm16Mask));
725 }
726
727
SetLwOffset(Instr instr,int16_t offset)728 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
729 DCHECK(IsLw(instr));
730
731 // We actually create a new lw instruction based on the original one.
732 Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
733 | (offset & kImm16Mask);
734
735 return temp_instr;
736 }
737
738
IsSw(Instr instr)739 bool Assembler::IsSw(Instr instr) {
740 return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
741 }
742
743
SetSwOffset(Instr instr,int16_t offset)744 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
745 DCHECK(IsSw(instr));
746 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
747 }
748
749
IsAddImmediate(Instr instr)750 bool Assembler::IsAddImmediate(Instr instr) {
751 return ((instr & kOpcodeMask) == ADDIU);
752 }
753
754
SetAddImmediateOffset(Instr instr,int16_t offset)755 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
756 DCHECK(IsAddImmediate(instr));
757 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
758 }
759
760
IsAndImmediate(Instr instr)761 bool Assembler::IsAndImmediate(Instr instr) {
762 return GetOpcodeField(instr) == ANDI;
763 }
764
765
OffsetSizeInBits(Instr instr)766 static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
767 if (IsMipsArchVariant(kMips32r6)) {
768 if (Assembler::IsBc(instr)) {
769 return Assembler::OffsetSize::kOffset26;
770 } else if (Assembler::IsBzc(instr)) {
771 return Assembler::OffsetSize::kOffset21;
772 }
773 }
774 return Assembler::OffsetSize::kOffset16;
775 }
776
777
AddBranchOffset(int pos,Instr instr)778 static inline int32_t AddBranchOffset(int pos, Instr instr) {
779 int bits = OffsetSizeInBits(instr);
780 const int32_t mask = (1 << bits) - 1;
781 bits = 32 - bits;
782
783 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
784 // the compiler uses arithmetic shifts for signed integers.
785 int32_t imm = ((instr & mask) << bits) >> (bits - 2);
786
787 if (imm == kEndOfChain) {
788 // EndOfChain sentinel is returned directly, not relative to pc or pos.
789 return kEndOfChain;
790 } else {
791 return pos + Assembler::kBranchPCOffset + imm;
792 }
793 }
794
CreateTargetAddress(Instr instr_lui,Instr instr_jic)795 uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) {
796 DCHECK(IsLui(instr_lui) && IsJicOrJialc(instr_jic));
797 int16_t jic_offset = GetImmediate16(instr_jic);
798 int16_t lui_offset = GetImmediate16(instr_lui);
799
800 if (jic_offset < 0) {
801 lui_offset += kImm16Mask;
802 }
803 uint32_t lui_offset_u = (static_cast<uint32_t>(lui_offset)) << kLuiShift;
804 uint32_t jic_offset_u = static_cast<uint32_t>(jic_offset) & kImm16Mask;
805
806 return lui_offset_u | jic_offset_u;
807 }
808
809 // Use just lui and jic instructions. Insert lower part of the target address in
810 // jic offset part. Since jic sign-extends offset and then add it with register,
811 // before that addition, difference between upper part of the target address and
812 // upper part of the sign-extended offset (0xFFFF or 0x0000), will be inserted
813 // in jic register with lui instruction.
UnpackTargetAddress(uint32_t address,int16_t & lui_offset,int16_t & jic_offset)814 void Assembler::UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
815 int16_t& jic_offset) {
816 lui_offset = (address & kHiMask) >> kLuiShift;
817 jic_offset = address & kLoMask;
818
819 if (jic_offset < 0) {
820 lui_offset -= kImm16Mask;
821 }
822 }
823
UnpackTargetAddressUnsigned(uint32_t address,uint32_t & lui_offset,uint32_t & jic_offset)824 void Assembler::UnpackTargetAddressUnsigned(uint32_t address,
825 uint32_t& lui_offset,
826 uint32_t& jic_offset) {
827 int16_t lui_offset16 = (address & kHiMask) >> kLuiShift;
828 int16_t jic_offset16 = address & kLoMask;
829
830 if (jic_offset16 < 0) {
831 lui_offset16 -= kImm16Mask;
832 }
833 lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask;
834 jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask;
835 }
836
target_at(int pos,bool is_internal)837 int Assembler::target_at(int pos, bool is_internal) {
838 Instr instr = instr_at(pos);
839 if (is_internal) {
840 if (instr == 0) {
841 return kEndOfChain;
842 } else {
843 int32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
844 int delta = static_cast<int>(instr_address - instr);
845 DCHECK(pos > delta);
846 return pos - delta;
847 }
848 }
849 if ((instr & ~kImm16Mask) == 0) {
850 // Emitted label constant, not part of a branch.
851 if (instr == 0) {
852 return kEndOfChain;
853 } else {
854 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
855 return (imm18 + pos);
856 }
857 }
858 // Check we have a branch or jump instruction.
859 DCHECK(IsBranch(instr) || IsLui(instr) || IsMov(instr, t8, ra));
860 if (IsBranch(instr)) {
861 return AddBranchOffset(pos, instr);
862 } else if (IsMov(instr, t8, ra)) {
863 int32_t imm32;
864 Instr instr_lui = instr_at(pos + 2 * kInstrSize);
865 Instr instr_ori = instr_at(pos + 3 * kInstrSize);
866 DCHECK(IsLui(instr_lui));
867 DCHECK(IsOri(instr_ori));
868 imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
869 imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
870 if (imm32 == kEndOfJumpChain) {
871 // EndOfChain sentinel is returned directly, not relative to pc or pos.
872 return kEndOfChain;
873 }
874 return pos + Assembler::kLongBranchPCOffset + imm32;
875 } else {
876 DCHECK(IsLui(instr));
877 if (IsNal(instr_at(pos + kInstrSize))) {
878 int32_t imm32;
879 Instr instr_lui = instr_at(pos + 0 * kInstrSize);
880 Instr instr_ori = instr_at(pos + 2 * kInstrSize);
881 DCHECK(IsLui(instr_lui));
882 DCHECK(IsOri(instr_ori));
883 imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
884 imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
885 if (imm32 == kEndOfJumpChain) {
886 // EndOfChain sentinel is returned directly, not relative to pc or pos.
887 return kEndOfChain;
888 }
889 return pos + Assembler::kLongBranchPCOffset + imm32;
890 } else {
891 Instr instr1 = instr_at(pos + 0 * kInstrSize);
892 Instr instr2 = instr_at(pos + 1 * kInstrSize);
893 DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
894 int32_t imm;
895 if (IsJicOrJialc(instr2)) {
896 imm = CreateTargetAddress(instr1, instr2);
897 } else {
898 imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
899 imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
900 }
901
902 if (imm == kEndOfJumpChain) {
903 // EndOfChain sentinel is returned directly, not relative to pc or pos.
904 return kEndOfChain;
905 } else {
906 uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
907 int32_t delta = instr_address - imm;
908 DCHECK(pos > delta);
909 return pos - delta;
910 }
911 }
912 }
913 return 0;
914 }
915
916
SetBranchOffset(int32_t pos,int32_t target_pos,Instr instr)917 static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
918 Instr instr) {
919 int32_t bits = OffsetSizeInBits(instr);
920 int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
921 DCHECK_EQ(imm & 3, 0);
922 imm >>= 2;
923
924 const int32_t mask = (1 << bits) - 1;
925 instr &= ~mask;
926 DCHECK(is_intn(imm, bits));
927
928 return instr | (imm & mask);
929 }
930
931
target_at_put(int32_t pos,int32_t target_pos,bool is_internal)932 void Assembler::target_at_put(int32_t pos, int32_t target_pos,
933 bool is_internal) {
934 Instr instr = instr_at(pos);
935
936 if (is_internal) {
937 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
938 instr_at_put(pos, imm);
939 return;
940 }
941 if ((instr & ~kImm16Mask) == 0) {
942 DCHECK(target_pos == kEndOfChain || target_pos >= 0);
943 // Emitted label constant, not part of a branch.
944 // Make label relative to Code* of generated Code object.
945 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
946 return;
947 }
948
949 DCHECK(IsBranch(instr) || IsLui(instr) || IsMov(instr, t8, ra));
950 if (IsBranch(instr)) {
951 instr = SetBranchOffset(pos, target_pos, instr);
952 instr_at_put(pos, instr);
953 } else if (IsMov(instr, t8, ra)) {
954 Instr instr_lui = instr_at(pos + 2 * kInstrSize);
955 Instr instr_ori = instr_at(pos + 3 * kInstrSize);
956 DCHECK(IsLui(instr_lui));
957 DCHECK(IsOri(instr_ori));
958
959 int32_t imm_short = target_pos - (pos + Assembler::kBranchPCOffset);
960
961 if (is_int16(imm_short)) {
962 // Optimize by converting to regular branch with 16-bit
963 // offset
964 Instr instr_b = BEQ;
965 instr_b = SetBranchOffset(pos, target_pos, instr_b);
966
967 Instr instr_j = instr_at(pos + 5 * kInstrSize);
968 Instr instr_branch_delay;
969
970 if (IsJump(instr_j)) {
971 instr_branch_delay = instr_at(pos + 6 * kInstrSize);
972 } else {
973 instr_branch_delay = instr_at(pos + 7 * kInstrSize);
974 }
975 instr_at_put(pos + 0 * kInstrSize, instr_b);
976 instr_at_put(pos + 1 * kInstrSize, instr_branch_delay);
977 } else {
978 int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
979 DCHECK_EQ(imm & 3, 0);
980
981 instr_lui &= ~kImm16Mask;
982 instr_ori &= ~kImm16Mask;
983
984 instr_at_put(pos + 2 * kInstrSize,
985 instr_lui | ((imm >> kLuiShift) & kImm16Mask));
986 instr_at_put(pos + 3 * kInstrSize, instr_ori | (imm & kImm16Mask));
987 }
988 } else {
989 DCHECK(IsLui(instr));
990 if (IsNal(instr_at(pos + kInstrSize))) {
991 Instr instr_lui = instr_at(pos + 0 * kInstrSize);
992 Instr instr_ori = instr_at(pos + 2 * kInstrSize);
993 DCHECK(IsLui(instr_lui));
994 DCHECK(IsOri(instr_ori));
995 int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
996 DCHECK_EQ(imm & 3, 0);
997 if (is_int16(imm + Assembler::kLongBranchPCOffset -
998 Assembler::kBranchPCOffset)) {
999 // Optimize by converting to regular branch and link with 16-bit
1000 // offset.
1001 Instr instr_b = REGIMM | BGEZAL; // Branch and link.
1002 instr_b = SetBranchOffset(pos, target_pos, instr_b);
1003 // Correct ra register to point to one instruction after jalr from
1004 // TurboAssembler::BranchAndLinkLong.
1005 Instr instr_a = ADDIU | ra.code() << kRsShift | ra.code() << kRtShift |
1006 kOptimizedBranchAndLinkLongReturnOffset;
1007
1008 instr_at_put(pos, instr_b);
1009 instr_at_put(pos + 1 * kInstrSize, instr_a);
1010 } else {
1011 instr_lui &= ~kImm16Mask;
1012 instr_ori &= ~kImm16Mask;
1013
1014 instr_at_put(pos + 0 * kInstrSize,
1015 instr_lui | ((imm >> kLuiShift) & kImm16Mask));
1016 instr_at_put(pos + 2 * kInstrSize, instr_ori | (imm & kImm16Mask));
1017 }
1018 } else {
1019 Instr instr1 = instr_at(pos + 0 * kInstrSize);
1020 Instr instr2 = instr_at(pos + 1 * kInstrSize);
1021 DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
1022 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
1023 DCHECK_EQ(imm & 3, 0);
1024 DCHECK(IsLui(instr1) && (IsJicOrJialc(instr2) || IsOri(instr2)));
1025 instr1 &= ~kImm16Mask;
1026 instr2 &= ~kImm16Mask;
1027
1028 if (IsJicOrJialc(instr2)) {
1029 uint32_t lui_offset_u, jic_offset_u;
1030 UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
1031 instr_at_put(pos + 0 * kInstrSize, instr1 | lui_offset_u);
1032 instr_at_put(pos + 1 * kInstrSize, instr2 | jic_offset_u);
1033 } else {
1034 instr_at_put(pos + 0 * kInstrSize,
1035 instr1 | ((imm & kHiMask) >> kLuiShift));
1036 instr_at_put(pos + 1 * kInstrSize, instr2 | (imm & kImm16Mask));
1037 }
1038 }
1039 }
1040 }
1041
print(const Label * L)1042 void Assembler::print(const Label* L) {
1043 if (L->is_unused()) {
1044 PrintF("unused label\n");
1045 } else if (L->is_bound()) {
1046 PrintF("bound label to %d\n", L->pos());
1047 } else if (L->is_linked()) {
1048 Label l;
1049 l.link_to(L->pos());
1050 PrintF("unbound label");
1051 while (l.is_linked()) {
1052 PrintF("@ %d ", l.pos());
1053 Instr instr = instr_at(l.pos());
1054 if ((instr & ~kImm16Mask) == 0) {
1055 PrintF("value\n");
1056 } else {
1057 PrintF("%d\n", instr);
1058 }
1059 next(&l, is_internal_reference(&l));
1060 }
1061 } else {
1062 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
1063 }
1064 }
1065
1066
bind_to(Label * L,int pos)1067 void Assembler::bind_to(Label* L, int pos) {
1068 DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
1069 int32_t trampoline_pos = kInvalidSlotPos;
1070 bool is_internal = false;
1071 if (L->is_linked() && !trampoline_emitted_) {
1072 unbound_labels_count_--;
1073 if (!is_internal_reference(L)) {
1074 next_buffer_check_ += kTrampolineSlotsSize;
1075 }
1076 }
1077
1078 while (L->is_linked()) {
1079 int32_t fixup_pos = L->pos();
1080 int32_t dist = pos - fixup_pos;
1081 is_internal = is_internal_reference(L);
1082 next(L, is_internal); // Call next before overwriting link with target at
1083 // fixup_pos.
1084 Instr instr = instr_at(fixup_pos);
1085 if (is_internal) {
1086 target_at_put(fixup_pos, pos, is_internal);
1087 } else {
1088 if (IsBranch(instr)) {
1089 int branch_offset = BranchOffset(instr);
1090 if (dist > branch_offset) {
1091 if (trampoline_pos == kInvalidSlotPos) {
1092 trampoline_pos = get_trampoline_entry(fixup_pos);
1093 CHECK_NE(trampoline_pos, kInvalidSlotPos);
1094 }
1095 CHECK((trampoline_pos - fixup_pos) <= branch_offset);
1096 target_at_put(fixup_pos, trampoline_pos, false);
1097 fixup_pos = trampoline_pos;
1098 }
1099 target_at_put(fixup_pos, pos, false);
1100 } else {
1101 target_at_put(fixup_pos, pos, false);
1102 }
1103 }
1104 }
1105 L->bind_to(pos);
1106
1107 // Keep track of the last bound label so we don't eliminate any instructions
1108 // before a bound label.
1109 if (pos > last_bound_pos_)
1110 last_bound_pos_ = pos;
1111 }
1112
1113
bind(Label * L)1114 void Assembler::bind(Label* L) {
1115 DCHECK(!L->is_bound()); // Label can only be bound once.
1116 bind_to(L, pc_offset());
1117 }
1118
1119
next(Label * L,bool is_internal)1120 void Assembler::next(Label* L, bool is_internal) {
1121 DCHECK(L->is_linked());
1122 int link = target_at(L->pos(), is_internal);
1123 if (link == kEndOfChain) {
1124 L->Unuse();
1125 } else {
1126 DCHECK_GE(link, 0);
1127 L->link_to(link);
1128 }
1129 }
1130
1131
is_near(Label * L)1132 bool Assembler::is_near(Label* L) {
1133 DCHECK(L->is_bound());
1134 return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
1135 }
1136
1137
is_near(Label * L,OffsetSize bits)1138 bool Assembler::is_near(Label* L, OffsetSize bits) {
1139 if (L == nullptr || !L->is_bound()) return true;
1140 return pc_offset() - L->pos() < (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize;
1141 }
1142
1143
is_near_branch(Label * L)1144 bool Assembler::is_near_branch(Label* L) {
1145 DCHECK(L->is_bound());
1146 return IsMipsArchVariant(kMips32r6) ? is_near_r6(L) : is_near_pre_r6(L);
1147 }
1148
1149
BranchOffset(Instr instr)1150 int Assembler::BranchOffset(Instr instr) {
1151 // At pre-R6 and for other R6 branches the offset is 16 bits.
1152 int bits = OffsetSize::kOffset16;
1153
1154 if (IsMipsArchVariant(kMips32r6)) {
1155 uint32_t opcode = GetOpcodeField(instr);
1156 switch (opcode) {
1157 // Checks BC or BALC.
1158 case BC:
1159 case BALC:
1160 bits = OffsetSize::kOffset26;
1161 break;
1162
1163 // Checks BEQZC or BNEZC.
1164 case POP66:
1165 case POP76:
1166 if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
1167 break;
1168 default:
1169 break;
1170 }
1171 }
1172
1173 return (1 << (bits + 2 - 1)) - 1;
1174 }
1175
1176
1177 // We have to use a temporary register for things that can be relocated even
1178 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
1179 // space. There is no guarantee that the relocated location can be similarly
1180 // encoded.
MustUseReg(RelocInfo::Mode rmode)1181 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
1182 return !RelocInfo::IsNone(rmode);
1183 }
1184
GenInstrRegister(Opcode opcode,Register rs,Register rt,Register rd,uint16_t sa,SecondaryField func)1185 void Assembler::GenInstrRegister(Opcode opcode,
1186 Register rs,
1187 Register rt,
1188 Register rd,
1189 uint16_t sa,
1190 SecondaryField func) {
1191 DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
1192 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1193 | (rd.code() << kRdShift) | (sa << kSaShift) | func;
1194 emit(instr);
1195 }
1196
1197
GenInstrRegister(Opcode opcode,Register rs,Register rt,uint16_t msb,uint16_t lsb,SecondaryField func)1198 void Assembler::GenInstrRegister(Opcode opcode,
1199 Register rs,
1200 Register rt,
1201 uint16_t msb,
1202 uint16_t lsb,
1203 SecondaryField func) {
1204 DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
1205 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1206 | (msb << kRdShift) | (lsb << kSaShift) | func;
1207 emit(instr);
1208 }
1209
1210
GenInstrRegister(Opcode opcode,SecondaryField fmt,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)1211 void Assembler::GenInstrRegister(Opcode opcode,
1212 SecondaryField fmt,
1213 FPURegister ft,
1214 FPURegister fs,
1215 FPURegister fd,
1216 SecondaryField func) {
1217 DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
1218 Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
1219 | (fd.code() << kFdShift) | func;
1220 emit(instr);
1221 }
1222
1223
GenInstrRegister(Opcode opcode,FPURegister fr,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)1224 void Assembler::GenInstrRegister(Opcode opcode,
1225 FPURegister fr,
1226 FPURegister ft,
1227 FPURegister fs,
1228 FPURegister fd,
1229 SecondaryField func) {
1230 DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
1231 Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
1232 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1233 emit(instr);
1234 }
1235
1236
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPURegister fs,FPURegister fd,SecondaryField func)1237 void Assembler::GenInstrRegister(Opcode opcode,
1238 SecondaryField fmt,
1239 Register rt,
1240 FPURegister fs,
1241 FPURegister fd,
1242 SecondaryField func) {
1243 DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
1244 Instr instr = opcode | fmt | (rt.code() << kRtShift)
1245 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1246 emit(instr);
1247 }
1248
1249
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPUControlRegister fs,SecondaryField func)1250 void Assembler::GenInstrRegister(Opcode opcode,
1251 SecondaryField fmt,
1252 Register rt,
1253 FPUControlRegister fs,
1254 SecondaryField func) {
1255 DCHECK(fs.is_valid() && rt.is_valid());
1256 Instr instr =
1257 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
1258 emit(instr);
1259 }
1260
1261
1262 // Instructions with immediate value.
1263 // Registers are in the order of the instruction encoding, from left to right.
GenInstrImmediate(Opcode opcode,Register rs,Register rt,int32_t j,CompactBranchType is_compact_branch)1264 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
1265 int32_t j,
1266 CompactBranchType is_compact_branch) {
1267 DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
1268 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1269 | (j & kImm16Mask);
1270 emit(instr, is_compact_branch);
1271 }
1272
GenInstrImmediate(Opcode opcode,Register base,Register rt,int32_t offset9,int bit6,SecondaryField func)1273 void Assembler::GenInstrImmediate(Opcode opcode, Register base, Register rt,
1274 int32_t offset9, int bit6,
1275 SecondaryField func) {
1276 DCHECK(base.is_valid() && rt.is_valid() && is_int9(offset9) &&
1277 is_uint1(bit6));
1278 Instr instr = opcode | (base.code() << kBaseShift) | (rt.code() << kRtShift) |
1279 ((offset9 << kImm9Shift) & kImm9Mask) | bit6 << kBit6Shift |
1280 func;
1281 emit(instr);
1282 }
1283
GenInstrImmediate(Opcode opcode,Register rs,SecondaryField SF,int32_t j,CompactBranchType is_compact_branch)1284 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
1285 int32_t j,
1286 CompactBranchType is_compact_branch) {
1287 DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
1288 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
1289 emit(instr, is_compact_branch);
1290 }
1291
1292
GenInstrImmediate(Opcode opcode,Register rs,FPURegister ft,int32_t j,CompactBranchType is_compact_branch)1293 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
1294 int32_t j,
1295 CompactBranchType is_compact_branch) {
1296 DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
1297 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
1298 | (j & kImm16Mask);
1299 emit(instr, is_compact_branch);
1300 }
1301
1302
GenInstrImmediate(Opcode opcode,Register rs,int32_t offset21,CompactBranchType is_compact_branch)1303 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
1304 CompactBranchType is_compact_branch) {
1305 DCHECK(rs.is_valid() && (is_int21(offset21)));
1306 Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1307 emit(instr, is_compact_branch);
1308 }
1309
1310
GenInstrImmediate(Opcode opcode,Register rs,uint32_t offset21)1311 void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
1312 uint32_t offset21) {
1313 DCHECK(rs.is_valid() && (is_uint21(offset21)));
1314 Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1315 emit(instr);
1316 }
1317
1318
GenInstrImmediate(Opcode opcode,int32_t offset26,CompactBranchType is_compact_branch)1319 void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
1320 CompactBranchType is_compact_branch) {
1321 DCHECK(is_int26(offset26));
1322 Instr instr = opcode | (offset26 & kImm26Mask);
1323 emit(instr, is_compact_branch);
1324 }
1325
1326
GenInstrJump(Opcode opcode,uint32_t address)1327 void Assembler::GenInstrJump(Opcode opcode,
1328 uint32_t address) {
1329 BlockTrampolinePoolScope block_trampoline_pool(this);
1330 DCHECK(is_uint26(address));
1331 Instr instr = opcode | address;
1332 emit(instr);
1333 BlockTrampolinePoolFor(1); // For associated delay slot.
1334 }
1335
1336 // MSA instructions
GenInstrMsaI8(SecondaryField operation,uint32_t imm8,MSARegister ws,MSARegister wd)1337 void Assembler::GenInstrMsaI8(SecondaryField operation, uint32_t imm8,
1338 MSARegister ws, MSARegister wd) {
1339 DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1340 DCHECK(ws.is_valid() && wd.is_valid() && is_uint8(imm8));
1341 Instr instr = MSA | operation | ((imm8 & kImm8Mask) << kWtShift) |
1342 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1343 emit(instr);
1344 }
1345
GenInstrMsaI5(SecondaryField operation,SecondaryField df,int32_t imm5,MSARegister ws,MSARegister wd)1346 void Assembler::GenInstrMsaI5(SecondaryField operation, SecondaryField df,
1347 int32_t imm5, MSARegister ws, MSARegister wd) {
1348 DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1349 DCHECK(ws.is_valid() && wd.is_valid());
1350 DCHECK((operation == MAXI_S) || (operation == MINI_S) ||
1351 (operation == CEQI) || (operation == CLTI_S) ||
1352 (operation == CLEI_S)
1353 ? is_int5(imm5)
1354 : is_uint5(imm5));
1355 Instr instr = MSA | operation | df | ((imm5 & kImm5Mask) << kWtShift) |
1356 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1357 emit(instr);
1358 }
1359
GenInstrMsaBit(SecondaryField operation,SecondaryField df,uint32_t m,MSARegister ws,MSARegister wd)1360 void Assembler::GenInstrMsaBit(SecondaryField operation, SecondaryField df,
1361 uint32_t m, MSARegister ws, MSARegister wd) {
1362 DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1363 DCHECK(ws.is_valid() && wd.is_valid() && is_valid_msa_df_m(df, m));
1364 Instr instr = MSA | operation | df | (m << kWtShift) |
1365 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1366 emit(instr);
1367 }
1368
GenInstrMsaI10(SecondaryField operation,SecondaryField df,int32_t imm10,MSARegister wd)1369 void Assembler::GenInstrMsaI10(SecondaryField operation, SecondaryField df,
1370 int32_t imm10, MSARegister wd) {
1371 DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1372 DCHECK(wd.is_valid() && is_int10(imm10));
1373 Instr instr = MSA | operation | df | ((imm10 & kImm10Mask) << kWsShift) |
1374 (wd.code() << kWdShift);
1375 emit(instr);
1376 }
1377
1378 template <typename RegType>
GenInstrMsa3R(SecondaryField operation,SecondaryField df,RegType t,MSARegister ws,MSARegister wd)1379 void Assembler::GenInstrMsa3R(SecondaryField operation, SecondaryField df,
1380 RegType t, MSARegister ws, MSARegister wd) {
1381 DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1382 DCHECK(t.is_valid() && ws.is_valid() && wd.is_valid());
1383 Instr instr = MSA | operation | df | (t.code() << kWtShift) |
1384 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1385 emit(instr);
1386 }
1387
1388 template <typename DstType, typename SrcType>
GenInstrMsaElm(SecondaryField operation,SecondaryField df,uint32_t n,SrcType src,DstType dst)1389 void Assembler::GenInstrMsaElm(SecondaryField operation, SecondaryField df,
1390 uint32_t n, SrcType src, DstType dst) {
1391 DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1392 DCHECK(src.is_valid() && dst.is_valid() && is_valid_msa_df_n(df, n));
1393 Instr instr = MSA | operation | df | (n << kWtShift) |
1394 (src.code() << kWsShift) | (dst.code() << kWdShift) |
1395 MSA_ELM_MINOR;
1396 emit(instr);
1397 }
1398
GenInstrMsa3RF(SecondaryField operation,uint32_t df,MSARegister wt,MSARegister ws,MSARegister wd)1399 void Assembler::GenInstrMsa3RF(SecondaryField operation, uint32_t df,
1400 MSARegister wt, MSARegister ws, MSARegister wd) {
1401 DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1402 DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
1403 DCHECK_LT(df, 2);
1404 Instr instr = MSA | operation | (df << 21) | (wt.code() << kWtShift) |
1405 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1406 emit(instr);
1407 }
1408
GenInstrMsaVec(SecondaryField operation,MSARegister wt,MSARegister ws,MSARegister wd)1409 void Assembler::GenInstrMsaVec(SecondaryField operation, MSARegister wt,
1410 MSARegister ws, MSARegister wd) {
1411 DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1412 DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
1413 Instr instr = MSA | operation | (wt.code() << kWtShift) |
1414 (ws.code() << kWsShift) | (wd.code() << kWdShift) |
1415 MSA_VEC_2R_2RF_MINOR;
1416 emit(instr);
1417 }
1418
GenInstrMsaMI10(SecondaryField operation,int32_t s10,Register rs,MSARegister wd)1419 void Assembler::GenInstrMsaMI10(SecondaryField operation, int32_t s10,
1420 Register rs, MSARegister wd) {
1421 DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1422 DCHECK(rs.is_valid() && wd.is_valid() && is_int10(s10));
1423 Instr instr = MSA | operation | ((s10 & kImm10Mask) << kWtShift) |
1424 (rs.code() << kWsShift) | (wd.code() << kWdShift);
1425 emit(instr);
1426 }
1427
GenInstrMsa2R(SecondaryField operation,SecondaryField df,MSARegister ws,MSARegister wd)1428 void Assembler::GenInstrMsa2R(SecondaryField operation, SecondaryField df,
1429 MSARegister ws, MSARegister wd) {
1430 DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1431 DCHECK(ws.is_valid() && wd.is_valid());
1432 Instr instr = MSA | MSA_2R_FORMAT | operation | df | (ws.code() << kWsShift) |
1433 (wd.code() << kWdShift) | MSA_VEC_2R_2RF_MINOR;
1434 emit(instr);
1435 }
1436
GenInstrMsa2RF(SecondaryField operation,SecondaryField df,MSARegister ws,MSARegister wd)1437 void Assembler::GenInstrMsa2RF(SecondaryField operation, SecondaryField df,
1438 MSARegister ws, MSARegister wd) {
1439 DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1440 DCHECK(ws.is_valid() && wd.is_valid());
1441 Instr instr = MSA | MSA_2RF_FORMAT | operation | df |
1442 (ws.code() << kWsShift) | (wd.code() << kWdShift) |
1443 MSA_VEC_2R_2RF_MINOR;
1444 emit(instr);
1445 }
1446
GenInstrMsaBranch(SecondaryField operation,MSARegister wt,int32_t offset16)1447 void Assembler::GenInstrMsaBranch(SecondaryField operation, MSARegister wt,
1448 int32_t offset16) {
1449 DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1450 DCHECK(wt.is_valid() && is_int16(offset16));
1451 BlockTrampolinePoolScope block_trampoline_pool(this);
1452 Instr instr =
1453 COP1 | operation | (wt.code() << kWtShift) | (offset16 & kImm16Mask);
1454 emit(instr);
1455 BlockTrampolinePoolFor(1); // For associated delay slot.
1456 }
1457
1458 // Returns the next free trampoline entry.
get_trampoline_entry(int32_t pos)1459 int32_t Assembler::get_trampoline_entry(int32_t pos) {
1460 int32_t trampoline_entry = kInvalidSlotPos;
1461
1462 if (!internal_trampoline_exception_) {
1463 if (trampoline_.start() > pos) {
1464 trampoline_entry = trampoline_.take_slot();
1465 }
1466
1467 if (kInvalidSlotPos == trampoline_entry) {
1468 internal_trampoline_exception_ = true;
1469 }
1470 }
1471 return trampoline_entry;
1472 }
1473
1474
jump_address(Label * L)1475 uint32_t Assembler::jump_address(Label* L) {
1476 int32_t target_pos;
1477
1478 if (L->is_bound()) {
1479 target_pos = L->pos();
1480 } else {
1481 if (L->is_linked()) {
1482 target_pos = L->pos(); // L's link.
1483 L->link_to(pc_offset());
1484 } else {
1485 L->link_to(pc_offset());
1486 return kEndOfJumpChain;
1487 }
1488 }
1489
1490 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
1491 DCHECK_EQ(imm & 3, 0);
1492
1493 return imm;
1494 }
1495
branch_long_offset(Label * L)1496 uint32_t Assembler::branch_long_offset(Label* L) {
1497 int32_t target_pos;
1498
1499 if (L->is_bound()) {
1500 target_pos = L->pos();
1501 } else {
1502 if (L->is_linked()) {
1503 target_pos = L->pos(); // L's link.
1504 L->link_to(pc_offset());
1505 } else {
1506 L->link_to(pc_offset());
1507 return kEndOfJumpChain;
1508 }
1509 }
1510
1511 DCHECK(is_int32(static_cast<int64_t>(target_pos) -
1512 static_cast<int64_t>(pc_offset() + kLongBranchPCOffset)));
1513 int32_t offset = target_pos - (pc_offset() + kLongBranchPCOffset);
1514 DCHECK_EQ(offset & 3, 0);
1515
1516 return offset;
1517 }
1518
branch_offset_helper(Label * L,OffsetSize bits)1519 int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
1520 int32_t target_pos;
1521 int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1522
1523 if (L->is_bound()) {
1524 target_pos = L->pos();
1525 } else {
1526 if (L->is_linked()) {
1527 target_pos = L->pos();
1528 L->link_to(pc_offset() + pad);
1529 } else {
1530 L->link_to(pc_offset() + pad);
1531 if (!trampoline_emitted_) {
1532 unbound_labels_count_++;
1533 next_buffer_check_ -= kTrampolineSlotsSize;
1534 }
1535 return kEndOfChain;
1536 }
1537 }
1538
1539 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
1540 DCHECK(is_intn(offset, bits + 2));
1541 DCHECK_EQ(offset & 3, 0);
1542
1543 return offset;
1544 }
1545
1546
label_at_put(Label * L,int at_offset)1547 void Assembler::label_at_put(Label* L, int at_offset) {
1548 int target_pos;
1549 if (L->is_bound()) {
1550 target_pos = L->pos();
1551 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1552 } else {
1553 if (L->is_linked()) {
1554 target_pos = L->pos(); // L's link.
1555 int32_t imm18 = target_pos - at_offset;
1556 DCHECK_EQ(imm18 & 3, 0);
1557 int32_t imm16 = imm18 >> 2;
1558 DCHECK(is_int16(imm16));
1559 instr_at_put(at_offset, (imm16 & kImm16Mask));
1560 } else {
1561 target_pos = kEndOfChain;
1562 instr_at_put(at_offset, 0);
1563 if (!trampoline_emitted_) {
1564 unbound_labels_count_++;
1565 next_buffer_check_ -= kTrampolineSlotsSize;
1566 }
1567 }
1568 L->link_to(at_offset);
1569 }
1570 }
1571
1572
1573 //------- Branch and jump instructions --------
1574
b(int16_t offset)1575 void Assembler::b(int16_t offset) {
1576 beq(zero_reg, zero_reg, offset);
1577 }
1578
1579
bal(int16_t offset)1580 void Assembler::bal(int16_t offset) {
1581 bgezal(zero_reg, offset);
1582 }
1583
1584
bc(int32_t offset)1585 void Assembler::bc(int32_t offset) {
1586 DCHECK(IsMipsArchVariant(kMips32r6));
1587 GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
1588 }
1589
1590
balc(int32_t offset)1591 void Assembler::balc(int32_t offset) {
1592 DCHECK(IsMipsArchVariant(kMips32r6));
1593 GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
1594 }
1595
1596
beq(Register rs,Register rt,int16_t offset)1597 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1598 BlockTrampolinePoolScope block_trampoline_pool(this);
1599 GenInstrImmediate(BEQ, rs, rt, offset);
1600 BlockTrampolinePoolFor(1); // For associated delay slot.
1601 }
1602
1603
bgez(Register rs,int16_t offset)1604 void Assembler::bgez(Register rs, int16_t offset) {
1605 BlockTrampolinePoolScope block_trampoline_pool(this);
1606 GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1607 BlockTrampolinePoolFor(1); // For associated delay slot.
1608 }
1609
1610
bgezc(Register rt,int16_t offset)1611 void Assembler::bgezc(Register rt, int16_t offset) {
1612 DCHECK(IsMipsArchVariant(kMips32r6));
1613 DCHECK(rt != zero_reg);
1614 GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1615 }
1616
1617
bgeuc(Register rs,Register rt,int16_t offset)1618 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1619 DCHECK(IsMipsArchVariant(kMips32r6));
1620 DCHECK(rs != zero_reg);
1621 DCHECK(rt != zero_reg);
1622 DCHECK(rs.code() != rt.code());
1623 GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1624 }
1625
1626
bgec(Register rs,Register rt,int16_t offset)1627 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1628 DCHECK(IsMipsArchVariant(kMips32r6));
1629 DCHECK(rs != zero_reg);
1630 DCHECK(rt != zero_reg);
1631 DCHECK(rs.code() != rt.code());
1632 GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1633 }
1634
1635
bgezal(Register rs,int16_t offset)1636 void Assembler::bgezal(Register rs, int16_t offset) {
1637 DCHECK(!IsMipsArchVariant(kMips32r6) || rs == zero_reg);
1638 DCHECK(rs != ra);
1639 BlockTrampolinePoolScope block_trampoline_pool(this);
1640 GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1641 BlockTrampolinePoolFor(1); // For associated delay slot.
1642 }
1643
1644
bgtz(Register rs,int16_t offset)1645 void Assembler::bgtz(Register rs, int16_t offset) {
1646 BlockTrampolinePoolScope block_trampoline_pool(this);
1647 GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1648 BlockTrampolinePoolFor(1); // For associated delay slot.
1649 }
1650
1651
bgtzc(Register rt,int16_t offset)1652 void Assembler::bgtzc(Register rt, int16_t offset) {
1653 DCHECK(IsMipsArchVariant(kMips32r6));
1654 DCHECK(rt != zero_reg);
1655 GenInstrImmediate(BGTZL, zero_reg, rt, offset,
1656 CompactBranchType::COMPACT_BRANCH);
1657 }
1658
1659
blez(Register rs,int16_t offset)1660 void Assembler::blez(Register rs, int16_t offset) {
1661 BlockTrampolinePoolScope block_trampoline_pool(this);
1662 GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1663 BlockTrampolinePoolFor(1); // For associated delay slot.
1664 }
1665
1666
blezc(Register rt,int16_t offset)1667 void Assembler::blezc(Register rt, int16_t offset) {
1668 DCHECK(IsMipsArchVariant(kMips32r6));
1669 DCHECK(rt != zero_reg);
1670 GenInstrImmediate(BLEZL, zero_reg, rt, offset,
1671 CompactBranchType::COMPACT_BRANCH);
1672 }
1673
1674
bltzc(Register rt,int16_t offset)1675 void Assembler::bltzc(Register rt, int16_t offset) {
1676 DCHECK(IsMipsArchVariant(kMips32r6));
1677 DCHECK(rt != zero_reg);
1678 GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1679 }
1680
1681
bltuc(Register rs,Register rt,int16_t offset)1682 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1683 DCHECK(IsMipsArchVariant(kMips32r6));
1684 DCHECK(rs != zero_reg);
1685 DCHECK(rt != zero_reg);
1686 DCHECK(rs.code() != rt.code());
1687 GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1688 }
1689
1690
bltc(Register rs,Register rt,int16_t offset)1691 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1692 DCHECK(IsMipsArchVariant(kMips32r6));
1693 DCHECK(rs != zero_reg);
1694 DCHECK(rt != zero_reg);
1695 DCHECK(rs.code() != rt.code());
1696 GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1697 }
1698
1699
bltz(Register rs,int16_t offset)1700 void Assembler::bltz(Register rs, int16_t offset) {
1701 BlockTrampolinePoolScope block_trampoline_pool(this);
1702 GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1703 BlockTrampolinePoolFor(1); // For associated delay slot.
1704 }
1705
1706
bltzal(Register rs,int16_t offset)1707 void Assembler::bltzal(Register rs, int16_t offset) {
1708 DCHECK(!IsMipsArchVariant(kMips32r6) || rs == zero_reg);
1709 DCHECK(rs != ra);
1710 BlockTrampolinePoolScope block_trampoline_pool(this);
1711 GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1712 BlockTrampolinePoolFor(1); // For associated delay slot.
1713 }
1714
1715
bne(Register rs,Register rt,int16_t offset)1716 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1717 BlockTrampolinePoolScope block_trampoline_pool(this);
1718 GenInstrImmediate(BNE, rs, rt, offset);
1719 BlockTrampolinePoolFor(1); // For associated delay slot.
1720 }
1721
1722
bovc(Register rs,Register rt,int16_t offset)1723 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1724 DCHECK(IsMipsArchVariant(kMips32r6));
1725 if (rs.code() >= rt.code()) {
1726 GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1727 } else {
1728 GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1729 }
1730 }
1731
1732
bnvc(Register rs,Register rt,int16_t offset)1733 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1734 DCHECK(IsMipsArchVariant(kMips32r6));
1735 if (rs.code() >= rt.code()) {
1736 GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1737 } else {
1738 GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1739 }
1740 }
1741
1742
blezalc(Register rt,int16_t offset)1743 void Assembler::blezalc(Register rt, int16_t offset) {
1744 DCHECK(IsMipsArchVariant(kMips32r6));
1745 DCHECK(rt != zero_reg);
1746 DCHECK(rt != ra);
1747 GenInstrImmediate(BLEZ, zero_reg, rt, offset,
1748 CompactBranchType::COMPACT_BRANCH);
1749 }
1750
1751
bgezalc(Register rt,int16_t offset)1752 void Assembler::bgezalc(Register rt, int16_t offset) {
1753 DCHECK(IsMipsArchVariant(kMips32r6));
1754 DCHECK(rt != zero_reg);
1755 DCHECK(rt != ra);
1756 GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1757 }
1758
1759
bgezall(Register rs,int16_t offset)1760 void Assembler::bgezall(Register rs, int16_t offset) {
1761 DCHECK(!IsMipsArchVariant(kMips32r6));
1762 DCHECK(rs != zero_reg);
1763 DCHECK(rs != ra);
1764 BlockTrampolinePoolScope block_trampoline_pool(this);
1765 GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1766 BlockTrampolinePoolFor(1); // For associated delay slot.
1767 }
1768
1769
bltzalc(Register rt,int16_t offset)1770 void Assembler::bltzalc(Register rt, int16_t offset) {
1771 DCHECK(IsMipsArchVariant(kMips32r6));
1772 DCHECK(rt != zero_reg);
1773 DCHECK(rt != ra);
1774 GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1775 }
1776
1777
bgtzalc(Register rt,int16_t offset)1778 void Assembler::bgtzalc(Register rt, int16_t offset) {
1779 DCHECK(IsMipsArchVariant(kMips32r6));
1780 DCHECK(rt != zero_reg);
1781 DCHECK(rt != ra);
1782 GenInstrImmediate(BGTZ, zero_reg, rt, offset,
1783 CompactBranchType::COMPACT_BRANCH);
1784 }
1785
1786
beqzalc(Register rt,int16_t offset)1787 void Assembler::beqzalc(Register rt, int16_t offset) {
1788 DCHECK(IsMipsArchVariant(kMips32r6));
1789 DCHECK(rt != zero_reg);
1790 DCHECK(rt != ra);
1791 GenInstrImmediate(ADDI, zero_reg, rt, offset,
1792 CompactBranchType::COMPACT_BRANCH);
1793 }
1794
1795
bnezalc(Register rt,int16_t offset)1796 void Assembler::bnezalc(Register rt, int16_t offset) {
1797 DCHECK(IsMipsArchVariant(kMips32r6));
1798 DCHECK(rt != zero_reg);
1799 DCHECK(rt != ra);
1800 GenInstrImmediate(DADDI, zero_reg, rt, offset,
1801 CompactBranchType::COMPACT_BRANCH);
1802 }
1803
1804
beqc(Register rs,Register rt,int16_t offset)1805 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1806 DCHECK(IsMipsArchVariant(kMips32r6));
1807 DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1808 if (rs.code() < rt.code()) {
1809 GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1810 } else {
1811 GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1812 }
1813 }
1814
1815
beqzc(Register rs,int32_t offset)1816 void Assembler::beqzc(Register rs, int32_t offset) {
1817 DCHECK(IsMipsArchVariant(kMips32r6));
1818 DCHECK(rs != zero_reg);
1819 GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
1820 }
1821
1822
bnec(Register rs,Register rt,int16_t offset)1823 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1824 DCHECK(IsMipsArchVariant(kMips32r6));
1825 DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1826 if (rs.code() < rt.code()) {
1827 GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1828 } else {
1829 GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1830 }
1831 }
1832
1833
bnezc(Register rs,int32_t offset)1834 void Assembler::bnezc(Register rs, int32_t offset) {
1835 DCHECK(IsMipsArchVariant(kMips32r6));
1836 DCHECK(rs != zero_reg);
1837 GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
1838 }
1839
1840
j(int32_t target)1841 void Assembler::j(int32_t target) {
1842 #if DEBUG
1843 // Get pc of delay slot.
1844 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1845 bool in_range = ((ipc ^ static_cast<uint32_t>(target)) >>
1846 (kImm26Bits + kImmFieldShift)) == 0;
1847 DCHECK(in_range && ((target & 3) == 0));
1848 #endif
1849 BlockTrampolinePoolScope block_trampoline_pool(this);
1850 GenInstrJump(J, (target >> 2) & kImm26Mask);
1851 BlockTrampolinePoolFor(1); // For associated delay slot.
1852 }
1853
1854
jr(Register rs)1855 void Assembler::jr(Register rs) {
1856 if (!IsMipsArchVariant(kMips32r6)) {
1857 BlockTrampolinePoolScope block_trampoline_pool(this);
1858 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1859 BlockTrampolinePoolFor(1); // For associated delay slot.
1860 } else {
1861 jalr(rs, zero_reg);
1862 }
1863 }
1864
1865
jal(int32_t target)1866 void Assembler::jal(int32_t target) {
1867 #ifdef DEBUG
1868 // Get pc of delay slot.
1869 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1870 bool in_range = ((ipc ^ static_cast<uint32_t>(target)) >>
1871 (kImm26Bits + kImmFieldShift)) == 0;
1872 DCHECK(in_range && ((target & 3) == 0));
1873 #endif
1874 BlockTrampolinePoolScope block_trampoline_pool(this);
1875 GenInstrJump(JAL, (target >> 2) & kImm26Mask);
1876 BlockTrampolinePoolFor(1); // For associated delay slot.
1877 }
1878
1879
jalr(Register rs,Register rd)1880 void Assembler::jalr(Register rs, Register rd) {
1881 DCHECK(rs.code() != rd.code());
1882 BlockTrampolinePoolScope block_trampoline_pool(this);
1883 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1884 BlockTrampolinePoolFor(1); // For associated delay slot.
1885 }
1886
1887
jic(Register rt,int16_t offset)1888 void Assembler::jic(Register rt, int16_t offset) {
1889 DCHECK(IsMipsArchVariant(kMips32r6));
1890 GenInstrImmediate(POP66, zero_reg, rt, offset);
1891 }
1892
1893
jialc(Register rt,int16_t offset)1894 void Assembler::jialc(Register rt, int16_t offset) {
1895 DCHECK(IsMipsArchVariant(kMips32r6));
1896 GenInstrImmediate(POP76, zero_reg, rt, offset);
1897 }
1898
1899
1900 // -------Data-processing-instructions---------
1901
1902 // Arithmetic.
1903
addu(Register rd,Register rs,Register rt)1904 void Assembler::addu(Register rd, Register rs, Register rt) {
1905 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1906 }
1907
1908
addiu(Register rd,Register rs,int32_t j)1909 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1910 GenInstrImmediate(ADDIU, rs, rd, j);
1911 }
1912
1913
subu(Register rd,Register rs,Register rt)1914 void Assembler::subu(Register rd, Register rs, Register rt) {
1915 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1916 }
1917
1918
mul(Register rd,Register rs,Register rt)1919 void Assembler::mul(Register rd, Register rs, Register rt) {
1920 if (!IsMipsArchVariant(kMips32r6)) {
1921 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1922 } else {
1923 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1924 }
1925 }
1926
1927
mulu(Register rd,Register rs,Register rt)1928 void Assembler::mulu(Register rd, Register rs, Register rt) {
1929 DCHECK(IsMipsArchVariant(kMips32r6));
1930 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1931 }
1932
1933
muh(Register rd,Register rs,Register rt)1934 void Assembler::muh(Register rd, Register rs, Register rt) {
1935 DCHECK(IsMipsArchVariant(kMips32r6));
1936 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1937 }
1938
1939
muhu(Register rd,Register rs,Register rt)1940 void Assembler::muhu(Register rd, Register rs, Register rt) {
1941 DCHECK(IsMipsArchVariant(kMips32r6));
1942 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1943 }
1944
1945
mod(Register rd,Register rs,Register rt)1946 void Assembler::mod(Register rd, Register rs, Register rt) {
1947 DCHECK(IsMipsArchVariant(kMips32r6));
1948 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1949 }
1950
1951
modu(Register rd,Register rs,Register rt)1952 void Assembler::modu(Register rd, Register rs, Register rt) {
1953 DCHECK(IsMipsArchVariant(kMips32r6));
1954 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1955 }
1956
1957
mult(Register rs,Register rt)1958 void Assembler::mult(Register rs, Register rt) {
1959 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1960 }
1961
1962
multu(Register rs,Register rt)1963 void Assembler::multu(Register rs, Register rt) {
1964 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1965 }
1966
1967
div(Register rs,Register rt)1968 void Assembler::div(Register rs, Register rt) {
1969 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1970 }
1971
1972
div(Register rd,Register rs,Register rt)1973 void Assembler::div(Register rd, Register rs, Register rt) {
1974 DCHECK(IsMipsArchVariant(kMips32r6));
1975 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1976 }
1977
1978
divu(Register rs,Register rt)1979 void Assembler::divu(Register rs, Register rt) {
1980 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1981 }
1982
1983
divu(Register rd,Register rs,Register rt)1984 void Assembler::divu(Register rd, Register rs, Register rt) {
1985 DCHECK(IsMipsArchVariant(kMips32r6));
1986 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1987 }
1988
1989
1990 // Logical.
1991
and_(Register rd,Register rs,Register rt)1992 void Assembler::and_(Register rd, Register rs, Register rt) {
1993 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1994 }
1995
1996
andi(Register rt,Register rs,int32_t j)1997 void Assembler::andi(Register rt, Register rs, int32_t j) {
1998 DCHECK(is_uint16(j));
1999 GenInstrImmediate(ANDI, rs, rt, j);
2000 }
2001
2002
or_(Register rd,Register rs,Register rt)2003 void Assembler::or_(Register rd, Register rs, Register rt) {
2004 GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
2005 }
2006
2007
ori(Register rt,Register rs,int32_t j)2008 void Assembler::ori(Register rt, Register rs, int32_t j) {
2009 DCHECK(is_uint16(j));
2010 GenInstrImmediate(ORI, rs, rt, j);
2011 }
2012
2013
xor_(Register rd,Register rs,Register rt)2014 void Assembler::xor_(Register rd, Register rs, Register rt) {
2015 GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
2016 }
2017
2018
xori(Register rt,Register rs,int32_t j)2019 void Assembler::xori(Register rt, Register rs, int32_t j) {
2020 DCHECK(is_uint16(j));
2021 GenInstrImmediate(XORI, rs, rt, j);
2022 }
2023
2024
nor(Register rd,Register rs,Register rt)2025 void Assembler::nor(Register rd, Register rs, Register rt) {
2026 GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
2027 }
2028
2029
2030 // Shifts.
sll(Register rd,Register rt,uint16_t sa,bool coming_from_nop)2031 void Assembler::sll(Register rd,
2032 Register rt,
2033 uint16_t sa,
2034 bool coming_from_nop) {
2035 // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
2036 // generated using the sll instruction. They must be generated using
2037 // nop(int/NopMarkerTypes).
2038 DCHECK(coming_from_nop || !(rd == zero_reg && rt == zero_reg));
2039 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
2040 }
2041
2042
sllv(Register rd,Register rt,Register rs)2043 void Assembler::sllv(Register rd, Register rt, Register rs) {
2044 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
2045 }
2046
2047
srl(Register rd,Register rt,uint16_t sa)2048 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
2049 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
2050 }
2051
2052
srlv(Register rd,Register rt,Register rs)2053 void Assembler::srlv(Register rd, Register rt, Register rs) {
2054 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
2055 }
2056
2057
sra(Register rd,Register rt,uint16_t sa)2058 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
2059 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
2060 }
2061
2062
srav(Register rd,Register rt,Register rs)2063 void Assembler::srav(Register rd, Register rt, Register rs) {
2064 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
2065 }
2066
2067
rotr(Register rd,Register rt,uint16_t sa)2068 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
2069 // Should be called via MacroAssembler::Ror.
2070 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
2071 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2072 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
2073 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
2074 emit(instr);
2075 }
2076
2077
rotrv(Register rd,Register rt,Register rs)2078 void Assembler::rotrv(Register rd, Register rt, Register rs) {
2079 // Should be called via MacroAssembler::Ror.
2080 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
2081 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2082 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
2083 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
2084 emit(instr);
2085 }
2086
2087
lsa(Register rd,Register rt,Register rs,uint8_t sa)2088 void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
2089 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
2090 DCHECK_LE(sa, 3);
2091 DCHECK(IsMipsArchVariant(kMips32r6));
2092 Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
2093 rd.code() << kRdShift | sa << kSaShift | LSA;
2094 emit(instr);
2095 }
2096
2097
2098 // ------------Memory-instructions-------------
2099
AdjustBaseAndOffset(MemOperand & src,OffsetAccessType access_type,int second_access_add_to_offset)2100 void Assembler::AdjustBaseAndOffset(MemOperand& src,
2101 OffsetAccessType access_type,
2102 int second_access_add_to_offset) {
2103 // This method is used to adjust the base register and offset pair
2104 // for a load/store when the offset doesn't fit into int16_t.
2105 // It is assumed that 'base + offset' is sufficiently aligned for memory
2106 // operands that are machine word in size or smaller. For doubleword-sized
2107 // operands it's assumed that 'base' is a multiple of 8, while 'offset'
2108 // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments
2109 // and spilled variables on the stack accessed relative to the stack
2110 // pointer register).
2111 // We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8.
2112
2113 bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0;
2114 bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
2115 DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7.
2116
2117 // is_int16 must be passed a signed value, hence the static cast below.
2118 if (is_int16(src.offset()) &&
2119 (!two_accesses || is_int16(static_cast<int32_t>(
2120 src.offset() + second_access_add_to_offset)))) {
2121 // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
2122 // value) fits into int16_t.
2123 return;
2124 }
2125 UseScratchRegisterScope temps(this);
2126 Register scratch = temps.Acquire();
2127 DCHECK(src.rm() != scratch); // Must not overwrite the register 'base'
2128 // while loading 'offset'.
2129
2130 #ifdef DEBUG
2131 // Remember the "(mis)alignment" of 'offset', it will be checked at the end.
2132 uint32_t misalignment = src.offset() & (kDoubleSize - 1);
2133 #endif
2134
2135 // Do not load the whole 32-bit 'offset' if it can be represented as
2136 // a sum of two 16-bit signed offsets. This can save an instruction or two.
2137 // To simplify matters, only do this for a symmetric range of offsets from
2138 // about -64KB to about +64KB, allowing further addition of 4 when accessing
2139 // 64-bit variables with two 32-bit accesses.
2140 constexpr int32_t kMinOffsetForSimpleAdjustment =
2141 0x7FF8; // Max int16_t that's a multiple of 8.
2142 constexpr int32_t kMaxOffsetForSimpleAdjustment =
2143 2 * kMinOffsetForSimpleAdjustment;
2144 if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) {
2145 addiu(at, src.rm(), kMinOffsetForSimpleAdjustment);
2146 src.offset_ -= kMinOffsetForSimpleAdjustment;
2147 } else if (-kMaxOffsetForSimpleAdjustment <= src.offset() &&
2148 src.offset() < 0) {
2149 addiu(at, src.rm(), -kMinOffsetForSimpleAdjustment);
2150 src.offset_ += kMinOffsetForSimpleAdjustment;
2151 } else if (IsMipsArchVariant(kMips32r6)) {
2152 // On r6 take advantage of the aui instruction, e.g.:
2153 // aui at, base, offset_high
2154 // lw reg_lo, offset_low(at)
2155 // lw reg_hi, (offset_low+4)(at)
2156 // or when offset_low+4 overflows int16_t:
2157 // aui at, base, offset_high
2158 // addiu at, at, 8
2159 // lw reg_lo, (offset_low-8)(at)
2160 // lw reg_hi, (offset_low-4)(at)
2161 int16_t offset_high = static_cast<uint16_t>(src.offset() >> 16);
2162 int16_t offset_low = static_cast<uint16_t>(src.offset());
2163 offset_high += (offset_low < 0)
2164 ? 1
2165 : 0; // Account for offset sign extension in load/store.
2166 aui(scratch, src.rm(), static_cast<uint16_t>(offset_high));
2167 if (two_accesses && !is_int16(static_cast<int32_t>(
2168 offset_low + second_access_add_to_offset))) {
2169 // Avoid overflow in the 16-bit offset of the load/store instruction when
2170 // adding 4.
2171 addiu(scratch, scratch, kDoubleSize);
2172 offset_low -= kDoubleSize;
2173 }
2174 src.offset_ = offset_low;
2175 } else {
2176 // Do not load the whole 32-bit 'offset' if it can be represented as
2177 // a sum of three 16-bit signed offsets. This can save an instruction.
2178 // To simplify matters, only do this for a symmetric range of offsets from
2179 // about -96KB to about +96KB, allowing further addition of 4 when accessing
2180 // 64-bit variables with two 32-bit accesses.
2181 constexpr int32_t kMinOffsetForMediumAdjustment =
2182 2 * kMinOffsetForSimpleAdjustment;
2183 constexpr int32_t kMaxOffsetForMediumAdjustment =
2184 3 * kMinOffsetForSimpleAdjustment;
2185 if (0 <= src.offset() && src.offset() <= kMaxOffsetForMediumAdjustment) {
2186 addiu(scratch, src.rm(), kMinOffsetForMediumAdjustment / 2);
2187 addiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2);
2188 src.offset_ -= kMinOffsetForMediumAdjustment;
2189 } else if (-kMaxOffsetForMediumAdjustment <= src.offset() &&
2190 src.offset() < 0) {
2191 addiu(scratch, src.rm(), -kMinOffsetForMediumAdjustment / 2);
2192 addiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2);
2193 src.offset_ += kMinOffsetForMediumAdjustment;
2194 } else {
2195 // Now that all shorter options have been exhausted, load the full 32-bit
2196 // offset.
2197 int32_t loaded_offset = RoundDown(src.offset(), kDoubleSize);
2198 lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask);
2199 ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset.
2200 addu(scratch, scratch, src.rm());
2201 src.offset_ -= loaded_offset;
2202 }
2203 }
2204 src.rm_ = scratch;
2205
2206 DCHECK(is_int16(src.offset()));
2207 if (two_accesses) {
2208 DCHECK(is_int16(
2209 static_cast<int32_t>(src.offset() + second_access_add_to_offset)));
2210 }
2211 DCHECK(misalignment == (src.offset() & (kDoubleSize - 1)));
2212 }
2213
lb(Register rd,const MemOperand & rs)2214 void Assembler::lb(Register rd, const MemOperand& rs) {
2215 MemOperand source = rs;
2216 AdjustBaseAndOffset(source);
2217 GenInstrImmediate(LB, source.rm(), rd, source.offset());
2218 }
2219
2220
lbu(Register rd,const MemOperand & rs)2221 void Assembler::lbu(Register rd, const MemOperand& rs) {
2222 MemOperand source = rs;
2223 AdjustBaseAndOffset(source);
2224 GenInstrImmediate(LBU, source.rm(), rd, source.offset());
2225 }
2226
2227
lh(Register rd,const MemOperand & rs)2228 void Assembler::lh(Register rd, const MemOperand& rs) {
2229 MemOperand source = rs;
2230 AdjustBaseAndOffset(source);
2231 GenInstrImmediate(LH, source.rm(), rd, source.offset());
2232 }
2233
2234
lhu(Register rd,const MemOperand & rs)2235 void Assembler::lhu(Register rd, const MemOperand& rs) {
2236 MemOperand source = rs;
2237 AdjustBaseAndOffset(source);
2238 GenInstrImmediate(LHU, source.rm(), rd, source.offset());
2239 }
2240
2241
lw(Register rd,const MemOperand & rs)2242 void Assembler::lw(Register rd, const MemOperand& rs) {
2243 MemOperand source = rs;
2244 AdjustBaseAndOffset(source);
2245 GenInstrImmediate(LW, source.rm(), rd, source.offset());
2246 }
2247
2248
lwl(Register rd,const MemOperand & rs)2249 void Assembler::lwl(Register rd, const MemOperand& rs) {
2250 DCHECK(is_int16(rs.offset_));
2251 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2252 IsMipsArchVariant(kMips32r2));
2253 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
2254 }
2255
2256
lwr(Register rd,const MemOperand & rs)2257 void Assembler::lwr(Register rd, const MemOperand& rs) {
2258 DCHECK(is_int16(rs.offset_));
2259 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2260 IsMipsArchVariant(kMips32r2));
2261 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
2262 }
2263
2264
sb(Register rd,const MemOperand & rs)2265 void Assembler::sb(Register rd, const MemOperand& rs) {
2266 MemOperand source = rs;
2267 AdjustBaseAndOffset(source);
2268 GenInstrImmediate(SB, source.rm(), rd, source.offset());
2269 }
2270
2271
sh(Register rd,const MemOperand & rs)2272 void Assembler::sh(Register rd, const MemOperand& rs) {
2273 MemOperand source = rs;
2274 AdjustBaseAndOffset(source);
2275 GenInstrImmediate(SH, source.rm(), rd, source.offset());
2276 }
2277
2278
sw(Register rd,const MemOperand & rs)2279 void Assembler::sw(Register rd, const MemOperand& rs) {
2280 MemOperand source = rs;
2281 AdjustBaseAndOffset(source);
2282 GenInstrImmediate(SW, source.rm(), rd, source.offset());
2283 }
2284
2285
swl(Register rd,const MemOperand & rs)2286 void Assembler::swl(Register rd, const MemOperand& rs) {
2287 DCHECK(is_int16(rs.offset_));
2288 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2289 IsMipsArchVariant(kMips32r2));
2290 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
2291 }
2292
2293
swr(Register rd,const MemOperand & rs)2294 void Assembler::swr(Register rd, const MemOperand& rs) {
2295 DCHECK(is_int16(rs.offset_));
2296 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2297 IsMipsArchVariant(kMips32r2));
2298 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
2299 }
2300
ll(Register rd,const MemOperand & rs)2301 void Assembler::ll(Register rd, const MemOperand& rs) {
2302 if (IsMipsArchVariant(kMips32r6)) {
2303 DCHECK(is_int9(rs.offset_));
2304 GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, LL_R6);
2305 } else {
2306 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2307 IsMipsArchVariant(kMips32r2));
2308 DCHECK(is_int16(rs.offset_));
2309 GenInstrImmediate(LL, rs.rm(), rd, rs.offset_);
2310 }
2311 }
2312
sc(Register rd,const MemOperand & rs)2313 void Assembler::sc(Register rd, const MemOperand& rs) {
2314 if (IsMipsArchVariant(kMips32r6)) {
2315 DCHECK(is_int9(rs.offset_));
2316 GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, SC_R6);
2317 } else {
2318 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2319 IsMipsArchVariant(kMips32r2));
2320 GenInstrImmediate(SC, rs.rm(), rd, rs.offset_);
2321 }
2322 }
2323
lui(Register rd,int32_t j)2324 void Assembler::lui(Register rd, int32_t j) {
2325 DCHECK(is_uint16(j) || is_int16(j));
2326 GenInstrImmediate(LUI, zero_reg, rd, j);
2327 }
2328
2329
aui(Register rt,Register rs,int32_t j)2330 void Assembler::aui(Register rt, Register rs, int32_t j) {
2331 // This instruction uses same opcode as 'lui'. The difference in encoding is
2332 // 'lui' has zero reg. for rs field.
2333 DCHECK(IsMipsArchVariant(kMips32r6));
2334 DCHECK(rs != zero_reg);
2335 DCHECK(is_uint16(j));
2336 GenInstrImmediate(LUI, rs, rt, j);
2337 }
2338
2339 // ---------PC-Relative instructions-----------
2340
addiupc(Register rs,int32_t imm19)2341 void Assembler::addiupc(Register rs, int32_t imm19) {
2342 DCHECK(IsMipsArchVariant(kMips32r6));
2343 DCHECK(rs.is_valid() && is_int19(imm19));
2344 uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
2345 GenInstrImmediate(PCREL, rs, imm21);
2346 }
2347
2348
lwpc(Register rs,int32_t offset19)2349 void Assembler::lwpc(Register rs, int32_t offset19) {
2350 DCHECK(IsMipsArchVariant(kMips32r6));
2351 DCHECK(rs.is_valid() && is_int19(offset19));
2352 uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
2353 GenInstrImmediate(PCREL, rs, imm21);
2354 }
2355
2356
auipc(Register rs,int16_t imm16)2357 void Assembler::auipc(Register rs, int16_t imm16) {
2358 DCHECK(IsMipsArchVariant(kMips32r6));
2359 DCHECK(rs.is_valid());
2360 uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
2361 GenInstrImmediate(PCREL, rs, imm21);
2362 }
2363
2364
aluipc(Register rs,int16_t imm16)2365 void Assembler::aluipc(Register rs, int16_t imm16) {
2366 DCHECK(IsMipsArchVariant(kMips32r6));
2367 DCHECK(rs.is_valid());
2368 uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
2369 GenInstrImmediate(PCREL, rs, imm21);
2370 }
2371
2372
2373 // -------------Misc-instructions--------------
2374
2375 // Break / Trap instructions.
break_(uint32_t code,bool break_as_stop)2376 void Assembler::break_(uint32_t code, bool break_as_stop) {
2377 DCHECK_EQ(code & ~0xFFFFF, 0);
2378 // We need to invalidate breaks that could be stops as well because the
2379 // simulator expects a char pointer after the stop instruction.
2380 // See constants-mips.h for explanation.
2381 DCHECK((break_as_stop &&
2382 code <= kMaxStopCode &&
2383 code > kMaxWatchpointCode) ||
2384 (!break_as_stop &&
2385 (code > kMaxStopCode ||
2386 code <= kMaxWatchpointCode)));
2387 Instr break_instr = SPECIAL | BREAK | (code << 6);
2388 emit(break_instr);
2389 }
2390
2391
stop(const char * msg,uint32_t code)2392 void Assembler::stop(const char* msg, uint32_t code) {
2393 DCHECK_GT(code, kMaxWatchpointCode);
2394 DCHECK_LE(code, kMaxStopCode);
2395 #if V8_HOST_ARCH_MIPS
2396 break_(0x54321);
2397 #else // V8_HOST_ARCH_MIPS
2398 break_(code, true);
2399 #endif
2400 }
2401
2402
tge(Register rs,Register rt,uint16_t code)2403 void Assembler::tge(Register rs, Register rt, uint16_t code) {
2404 DCHECK(is_uint10(code));
2405 Instr instr = SPECIAL | TGE | rs.code() << kRsShift
2406 | rt.code() << kRtShift | code << 6;
2407 emit(instr);
2408 }
2409
2410
tgeu(Register rs,Register rt,uint16_t code)2411 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
2412 DCHECK(is_uint10(code));
2413 Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
2414 | rt.code() << kRtShift | code << 6;
2415 emit(instr);
2416 }
2417
2418
tlt(Register rs,Register rt,uint16_t code)2419 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
2420 DCHECK(is_uint10(code));
2421 Instr instr =
2422 SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2423 emit(instr);
2424 }
2425
2426
tltu(Register rs,Register rt,uint16_t code)2427 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
2428 DCHECK(is_uint10(code));
2429 Instr instr =
2430 SPECIAL | TLTU | rs.code() << kRsShift
2431 | rt.code() << kRtShift | code << 6;
2432 emit(instr);
2433 }
2434
2435
teq(Register rs,Register rt,uint16_t code)2436 void Assembler::teq(Register rs, Register rt, uint16_t code) {
2437 DCHECK(is_uint10(code));
2438 Instr instr =
2439 SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2440 emit(instr);
2441 }
2442
2443
tne(Register rs,Register rt,uint16_t code)2444 void Assembler::tne(Register rs, Register rt, uint16_t code) {
2445 DCHECK(is_uint10(code));
2446 Instr instr =
2447 SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2448 emit(instr);
2449 }
2450
sync()2451 void Assembler::sync() {
2452 Instr sync_instr = SPECIAL | SYNC;
2453 emit(sync_instr);
2454 }
2455
2456 // Move from HI/LO register.
2457
mfhi(Register rd)2458 void Assembler::mfhi(Register rd) {
2459 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
2460 }
2461
2462
mflo(Register rd)2463 void Assembler::mflo(Register rd) {
2464 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
2465 }
2466
2467
2468 // Set on less than instructions.
slt(Register rd,Register rs,Register rt)2469 void Assembler::slt(Register rd, Register rs, Register rt) {
2470 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
2471 }
2472
2473
sltu(Register rd,Register rs,Register rt)2474 void Assembler::sltu(Register rd, Register rs, Register rt) {
2475 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
2476 }
2477
2478
slti(Register rt,Register rs,int32_t j)2479 void Assembler::slti(Register rt, Register rs, int32_t j) {
2480 GenInstrImmediate(SLTI, rs, rt, j);
2481 }
2482
2483
sltiu(Register rt,Register rs,int32_t j)2484 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
2485 GenInstrImmediate(SLTIU, rs, rt, j);
2486 }
2487
2488
2489 // Conditional move.
movz(Register rd,Register rs,Register rt)2490 void Assembler::movz(Register rd, Register rs, Register rt) {
2491 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2492 }
2493
2494
movn(Register rd,Register rs,Register rt)2495 void Assembler::movn(Register rd, Register rs, Register rt) {
2496 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2497 }
2498
2499
movt(Register rd,Register rs,uint16_t cc)2500 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2501 Register rt = Register::from_code((cc & 0x0007) << 2 | 1);
2502 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2503 }
2504
2505
movf(Register rd,Register rs,uint16_t cc)2506 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2507 Register rt = Register::from_code((cc & 0x0007) << 2 | 0);
2508 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2509 }
2510
2511
seleqz(Register rd,Register rs,Register rt)2512 void Assembler::seleqz(Register rd, Register rs, Register rt) {
2513 DCHECK(IsMipsArchVariant(kMips32r6));
2514 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2515 }
2516
2517
2518 // Bit twiddling.
clz(Register rd,Register rs)2519 void Assembler::clz(Register rd, Register rs) {
2520 if (!IsMipsArchVariant(kMips32r6)) {
2521 // Clz instr requires same GPR number in 'rd' and 'rt' fields.
2522 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2523 } else {
2524 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2525 }
2526 }
2527
2528
ins_(Register rt,Register rs,uint16_t pos,uint16_t size)2529 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2530 // Should be called via MacroAssembler::Ins.
2531 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2532 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2533 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2534 }
2535
2536
ext_(Register rt,Register rs,uint16_t pos,uint16_t size)2537 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2538 // Should be called via MacroAssembler::Ext.
2539 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
2540 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2541 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2542 }
2543
2544
bitswap(Register rd,Register rt)2545 void Assembler::bitswap(Register rd, Register rt) {
2546 DCHECK(IsMipsArchVariant(kMips32r6));
2547 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
2548 }
2549
2550
pref(int32_t hint,const MemOperand & rs)2551 void Assembler::pref(int32_t hint, const MemOperand& rs) {
2552 DCHECK(!IsMipsArchVariant(kLoongson));
2553 DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2554 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
2555 | (rs.offset_);
2556 emit(instr);
2557 }
2558
2559
align(Register rd,Register rs,Register rt,uint8_t bp)2560 void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
2561 DCHECK(IsMipsArchVariant(kMips32r6));
2562 DCHECK(is_uint3(bp));
2563 uint16_t sa = (ALIGN << kBp2Bits) | bp;
2564 GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
2565 }
2566
2567 // Byte swap.
wsbh(Register rd,Register rt)2568 void Assembler::wsbh(Register rd, Register rt) {
2569 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2570 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL);
2571 }
2572
seh(Register rd,Register rt)2573 void Assembler::seh(Register rd, Register rt) {
2574 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2575 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL);
2576 }
2577
seb(Register rd,Register rt)2578 void Assembler::seb(Register rd, Register rt) {
2579 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2580 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
2581 }
2582
2583 // --------Coprocessor-instructions----------------
2584
2585 // Load, store, move.
lwc1(FPURegister fd,const MemOperand & src)2586 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
2587 MemOperand tmp = src;
2588 AdjustBaseAndOffset(tmp);
2589 GenInstrImmediate(LWC1, tmp.rm(), fd, tmp.offset());
2590 }
2591
2592
swc1(FPURegister fd,const MemOperand & src)2593 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
2594 MemOperand tmp = src;
2595 AdjustBaseAndOffset(tmp);
2596 GenInstrImmediate(SWC1, tmp.rm(), fd, tmp.offset());
2597 }
2598
2599
mtc1(Register rt,FPURegister fs)2600 void Assembler::mtc1(Register rt, FPURegister fs) {
2601 GenInstrRegister(COP1, MTC1, rt, fs, f0);
2602 }
2603
2604
mthc1(Register rt,FPURegister fs)2605 void Assembler::mthc1(Register rt, FPURegister fs) {
2606 GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2607 }
2608
2609
mfc1(Register rt,FPURegister fs)2610 void Assembler::mfc1(Register rt, FPURegister fs) {
2611 GenInstrRegister(COP1, MFC1, rt, fs, f0);
2612 }
2613
2614
mfhc1(Register rt,FPURegister fs)2615 void Assembler::mfhc1(Register rt, FPURegister fs) {
2616 GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2617 }
2618
2619
ctc1(Register rt,FPUControlRegister fs)2620 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2621 GenInstrRegister(COP1, CTC1, rt, fs);
2622 }
2623
2624
cfc1(Register rt,FPUControlRegister fs)2625 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2626 GenInstrRegister(COP1, CFC1, rt, fs);
2627 }
2628
2629
movn_s(FPURegister fd,FPURegister fs,Register rt)2630 void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
2631 DCHECK(!IsMipsArchVariant(kMips32r6));
2632 GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
2633 }
2634
2635
movn_d(FPURegister fd,FPURegister fs,Register rt)2636 void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
2637 DCHECK(!IsMipsArchVariant(kMips32r6));
2638 GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
2639 }
2640
2641
sel(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2642 void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
2643 FPURegister ft) {
2644 DCHECK(IsMipsArchVariant(kMips32r6));
2645 DCHECK((fmt == D) || (fmt == S));
2646
2647 GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
2648 }
2649
2650
sel_s(FPURegister fd,FPURegister fs,FPURegister ft)2651 void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2652 sel(S, fd, fs, ft);
2653 }
2654
2655
sel_d(FPURegister fd,FPURegister fs,FPURegister ft)2656 void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2657 sel(D, fd, fs, ft);
2658 }
2659
2660
seleqz(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2661 void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
2662 FPURegister ft) {
2663 DCHECK(IsMipsArchVariant(kMips32r6));
2664 DCHECK((fmt == D) || (fmt == S));
2665 GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
2666 }
2667
2668
selnez(Register rd,Register rs,Register rt)2669 void Assembler::selnez(Register rd, Register rs, Register rt) {
2670 DCHECK(IsMipsArchVariant(kMips32r6));
2671 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2672 }
2673
2674
selnez(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2675 void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
2676 FPURegister ft) {
2677 DCHECK(IsMipsArchVariant(kMips32r6));
2678 DCHECK((fmt == D) || (fmt == S));
2679 GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
2680 }
2681
2682
seleqz_d(FPURegister fd,FPURegister fs,FPURegister ft)2683 void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2684 seleqz(D, fd, fs, ft);
2685 }
2686
2687
seleqz_s(FPURegister fd,FPURegister fs,FPURegister ft)2688 void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2689 seleqz(S, fd, fs, ft);
2690 }
2691
2692
selnez_d(FPURegister fd,FPURegister fs,FPURegister ft)2693 void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2694 selnez(D, fd, fs, ft);
2695 }
2696
2697
selnez_s(FPURegister fd,FPURegister fs,FPURegister ft)2698 void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2699 selnez(S, fd, fs, ft);
2700 }
2701
2702
movz_s(FPURegister fd,FPURegister fs,Register rt)2703 void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
2704 DCHECK(!IsMipsArchVariant(kMips32r6));
2705 GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
2706 }
2707
2708
movz_d(FPURegister fd,FPURegister fs,Register rt)2709 void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
2710 DCHECK(!IsMipsArchVariant(kMips32r6));
2711 GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
2712 }
2713
2714
movt_s(FPURegister fd,FPURegister fs,uint16_t cc)2715 void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2716 DCHECK(!IsMipsArchVariant(kMips32r6));
2717 FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
2718 GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2719 }
2720
2721
movt_d(FPURegister fd,FPURegister fs,uint16_t cc)2722 void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2723 DCHECK(!IsMipsArchVariant(kMips32r6));
2724 FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
2725 GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2726 }
2727
2728
movf_s(FPURegister fd,FPURegister fs,uint16_t cc)2729 void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2730 DCHECK(!IsMipsArchVariant(kMips32r6));
2731 FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
2732 GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2733 }
2734
2735
movf_d(FPURegister fd,FPURegister fs,uint16_t cc)2736 void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2737 DCHECK(!IsMipsArchVariant(kMips32r6));
2738 FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
2739 GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2740 }
2741
2742
2743 // Arithmetic.
2744
add_s(FPURegister fd,FPURegister fs,FPURegister ft)2745 void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2746 GenInstrRegister(COP1, S, ft, fs, fd, ADD_S);
2747 }
2748
2749
add_d(FPURegister fd,FPURegister fs,FPURegister ft)2750 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2751 GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2752 }
2753
2754
sub_s(FPURegister fd,FPURegister fs,FPURegister ft)2755 void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2756 GenInstrRegister(COP1, S, ft, fs, fd, SUB_S);
2757 }
2758
2759
sub_d(FPURegister fd,FPURegister fs,FPURegister ft)2760 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2761 GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2762 }
2763
2764
mul_s(FPURegister fd,FPURegister fs,FPURegister ft)2765 void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2766 GenInstrRegister(COP1, S, ft, fs, fd, MUL_S);
2767 }
2768
2769
mul_d(FPURegister fd,FPURegister fs,FPURegister ft)2770 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2771 GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2772 }
2773
madd_s(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2774 void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
2775 FPURegister ft) {
2776 DCHECK(IsMipsArchVariant(kMips32r2));
2777 GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_S);
2778 }
2779
madd_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2780 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2781 FPURegister ft) {
2782 DCHECK(IsMipsArchVariant(kMips32r2));
2783 GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
2784 }
2785
msub_s(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2786 void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
2787 FPURegister ft) {
2788 DCHECK(IsMipsArchVariant(kMips32r2));
2789 GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_S);
2790 }
2791
msub_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2792 void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
2793 FPURegister ft) {
2794 DCHECK(IsMipsArchVariant(kMips32r2));
2795 GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_D);
2796 }
2797
maddf_s(FPURegister fd,FPURegister fs,FPURegister ft)2798 void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2799 DCHECK(IsMipsArchVariant(kMips32r6));
2800 GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S);
2801 }
2802
maddf_d(FPURegister fd,FPURegister fs,FPURegister ft)2803 void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2804 DCHECK(IsMipsArchVariant(kMips32r6));
2805 GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D);
2806 }
2807
msubf_s(FPURegister fd,FPURegister fs,FPURegister ft)2808 void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2809 DCHECK(IsMipsArchVariant(kMips32r6));
2810 GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S);
2811 }
2812
msubf_d(FPURegister fd,FPURegister fs,FPURegister ft)2813 void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2814 DCHECK(IsMipsArchVariant(kMips32r6));
2815 GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D);
2816 }
2817
div_s(FPURegister fd,FPURegister fs,FPURegister ft)2818 void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2819 GenInstrRegister(COP1, S, ft, fs, fd, DIV_S);
2820 }
2821
2822
div_d(FPURegister fd,FPURegister fs,FPURegister ft)2823 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2824 GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2825 }
2826
2827
abs_s(FPURegister fd,FPURegister fs)2828 void Assembler::abs_s(FPURegister fd, FPURegister fs) {
2829 GenInstrRegister(COP1, S, f0, fs, fd, ABS_S);
2830 }
2831
2832
abs_d(FPURegister fd,FPURegister fs)2833 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2834 GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2835 }
2836
2837
mov_d(FPURegister fd,FPURegister fs)2838 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2839 GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2840 }
2841
2842
mov_s(FPURegister fd,FPURegister fs)2843 void Assembler::mov_s(FPURegister fd, FPURegister fs) {
2844 GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
2845 }
2846
2847
neg_s(FPURegister fd,FPURegister fs)2848 void Assembler::neg_s(FPURegister fd, FPURegister fs) {
2849 GenInstrRegister(COP1, S, f0, fs, fd, NEG_S);
2850 }
2851
2852
neg_d(FPURegister fd,FPURegister fs)2853 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2854 GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2855 }
2856
2857
sqrt_s(FPURegister fd,FPURegister fs)2858 void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
2859 GenInstrRegister(COP1, S, f0, fs, fd, SQRT_S);
2860 }
2861
2862
sqrt_d(FPURegister fd,FPURegister fs)2863 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2864 GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
2865 }
2866
2867
rsqrt_s(FPURegister fd,FPURegister fs)2868 void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
2869 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2870 GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
2871 }
2872
2873
rsqrt_d(FPURegister fd,FPURegister fs)2874 void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
2875 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2876 GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
2877 }
2878
2879
recip_d(FPURegister fd,FPURegister fs)2880 void Assembler::recip_d(FPURegister fd, FPURegister fs) {
2881 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2882 GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
2883 }
2884
2885
recip_s(FPURegister fd,FPURegister fs)2886 void Assembler::recip_s(FPURegister fd, FPURegister fs) {
2887 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2888 GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
2889 }
2890
2891
2892 // Conversions.
2893
cvt_w_s(FPURegister fd,FPURegister fs)2894 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2895 GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2896 }
2897
2898
cvt_w_d(FPURegister fd,FPURegister fs)2899 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2900 GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2901 }
2902
2903
trunc_w_s(FPURegister fd,FPURegister fs)2904 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2905 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2906 }
2907
2908
trunc_w_d(FPURegister fd,FPURegister fs)2909 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2910 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2911 }
2912
2913
round_w_s(FPURegister fd,FPURegister fs)2914 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2915 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2916 }
2917
2918
round_w_d(FPURegister fd,FPURegister fs)2919 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2920 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2921 }
2922
2923
floor_w_s(FPURegister fd,FPURegister fs)2924 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2925 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2926 }
2927
2928
floor_w_d(FPURegister fd,FPURegister fs)2929 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2930 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2931 }
2932
2933
ceil_w_s(FPURegister fd,FPURegister fs)2934 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2935 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2936 }
2937
2938
ceil_w_d(FPURegister fd,FPURegister fs)2939 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2940 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2941 }
2942
2943
rint_s(FPURegister fd,FPURegister fs)2944 void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
2945
2946
rint(SecondaryField fmt,FPURegister fd,FPURegister fs)2947 void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
2948 DCHECK(IsMipsArchVariant(kMips32r6));
2949 DCHECK((fmt == D) || (fmt == S));
2950 GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
2951 }
2952
2953
rint_d(FPURegister fd,FPURegister fs)2954 void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
2955
2956
cvt_l_s(FPURegister fd,FPURegister fs)2957 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
2958 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2959 IsFp64Mode());
2960 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
2961 }
2962
2963
cvt_l_d(FPURegister fd,FPURegister fs)2964 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
2965 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2966 IsFp64Mode());
2967 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
2968 }
2969
2970
trunc_l_s(FPURegister fd,FPURegister fs)2971 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
2972 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2973 IsFp64Mode());
2974 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
2975 }
2976
2977
trunc_l_d(FPURegister fd,FPURegister fs)2978 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
2979 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2980 IsFp64Mode());
2981 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
2982 }
2983
2984
round_l_s(FPURegister fd,FPURegister fs)2985 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
2986 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2987 IsFp64Mode());
2988 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
2989 }
2990
2991
round_l_d(FPURegister fd,FPURegister fs)2992 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
2993 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2994 IsFp64Mode());
2995 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
2996 }
2997
2998
floor_l_s(FPURegister fd,FPURegister fs)2999 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
3000 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
3001 IsFp64Mode());
3002 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
3003 }
3004
3005
floor_l_d(FPURegister fd,FPURegister fs)3006 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
3007 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
3008 IsFp64Mode());
3009 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
3010 }
3011
3012
ceil_l_s(FPURegister fd,FPURegister fs)3013 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
3014 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
3015 IsFp64Mode());
3016 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
3017 }
3018
3019
ceil_l_d(FPURegister fd,FPURegister fs)3020 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
3021 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
3022 IsFp64Mode());
3023 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
3024 }
3025
3026
class_s(FPURegister fd,FPURegister fs)3027 void Assembler::class_s(FPURegister fd, FPURegister fs) {
3028 DCHECK(IsMipsArchVariant(kMips32r6));
3029 GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
3030 }
3031
3032
class_d(FPURegister fd,FPURegister fs)3033 void Assembler::class_d(FPURegister fd, FPURegister fs) {
3034 DCHECK(IsMipsArchVariant(kMips32r6));
3035 GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
3036 }
3037
3038
min(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)3039 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
3040 FPURegister ft) {
3041 DCHECK(IsMipsArchVariant(kMips32r6));
3042 DCHECK((fmt == D) || (fmt == S));
3043 GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
3044 }
3045
3046
mina(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)3047 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
3048 FPURegister ft) {
3049 DCHECK(IsMipsArchVariant(kMips32r6));
3050 DCHECK((fmt == D) || (fmt == S));
3051 GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
3052 }
3053
3054
max(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)3055 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
3056 FPURegister ft) {
3057 DCHECK(IsMipsArchVariant(kMips32r6));
3058 DCHECK((fmt == D) || (fmt == S));
3059 GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
3060 }
3061
3062
maxa(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)3063 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
3064 FPURegister ft) {
3065 DCHECK(IsMipsArchVariant(kMips32r6));
3066 DCHECK((fmt == D) || (fmt == S));
3067 GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
3068 }
3069
3070
min_s(FPURegister fd,FPURegister fs,FPURegister ft)3071 void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3072 min(S, fd, fs, ft);
3073 }
3074
3075
min_d(FPURegister fd,FPURegister fs,FPURegister ft)3076 void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3077 min(D, fd, fs, ft);
3078 }
3079
3080
max_s(FPURegister fd,FPURegister fs,FPURegister ft)3081 void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3082 max(S, fd, fs, ft);
3083 }
3084
3085
max_d(FPURegister fd,FPURegister fs,FPURegister ft)3086 void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3087 max(D, fd, fs, ft);
3088 }
3089
3090
mina_s(FPURegister fd,FPURegister fs,FPURegister ft)3091 void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3092 mina(S, fd, fs, ft);
3093 }
3094
3095
mina_d(FPURegister fd,FPURegister fs,FPURegister ft)3096 void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3097 mina(D, fd, fs, ft);
3098 }
3099
3100
maxa_s(FPURegister fd,FPURegister fs,FPURegister ft)3101 void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3102 maxa(S, fd, fs, ft);
3103 }
3104
3105
maxa_d(FPURegister fd,FPURegister fs,FPURegister ft)3106 void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3107 maxa(D, fd, fs, ft);
3108 }
3109
3110
cvt_s_w(FPURegister fd,FPURegister fs)3111 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
3112 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
3113 }
3114
3115
cvt_s_l(FPURegister fd,FPURegister fs)3116 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
3117 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
3118 IsFp64Mode());
3119 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
3120 }
3121
3122
cvt_s_d(FPURegister fd,FPURegister fs)3123 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
3124 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
3125 }
3126
3127
cvt_d_w(FPURegister fd,FPURegister fs)3128 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
3129 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
3130 }
3131
3132
cvt_d_l(FPURegister fd,FPURegister fs)3133 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
3134 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
3135 IsFp64Mode());
3136 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
3137 }
3138
3139
cvt_d_s(FPURegister fd,FPURegister fs)3140 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
3141 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
3142 }
3143
3144
3145 // Conditions for >= MIPSr6.
cmp(FPUCondition cond,SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)3146 void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
3147 FPURegister fd, FPURegister fs, FPURegister ft) {
3148 DCHECK(IsMipsArchVariant(kMips32r6));
3149 DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
3150 Instr instr = COP1 | fmt | ft.code() << kFtShift |
3151 fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
3152 emit(instr);
3153 }
3154
3155
cmp_s(FPUCondition cond,FPURegister fd,FPURegister fs,FPURegister ft)3156 void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
3157 FPURegister ft) {
3158 cmp(cond, W, fd, fs, ft);
3159 }
3160
cmp_d(FPUCondition cond,FPURegister fd,FPURegister fs,FPURegister ft)3161 void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
3162 FPURegister ft) {
3163 cmp(cond, L, fd, fs, ft);
3164 }
3165
3166
bc1eqz(int16_t offset,FPURegister ft)3167 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
3168 DCHECK(IsMipsArchVariant(kMips32r6));
3169 BlockTrampolinePoolScope block_trampoline_pool(this);
3170 Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
3171 emit(instr);
3172 BlockTrampolinePoolFor(1); // For associated delay slot.
3173 }
3174
3175
bc1nez(int16_t offset,FPURegister ft)3176 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
3177 DCHECK(IsMipsArchVariant(kMips32r6));
3178 BlockTrampolinePoolScope block_trampoline_pool(this);
3179 Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
3180 emit(instr);
3181 BlockTrampolinePoolFor(1); // For associated delay slot.
3182 }
3183
3184
3185 // Conditions for < MIPSr6.
c(FPUCondition cond,SecondaryField fmt,FPURegister fs,FPURegister ft,uint16_t cc)3186 void Assembler::c(FPUCondition cond, SecondaryField fmt,
3187 FPURegister fs, FPURegister ft, uint16_t cc) {
3188 DCHECK(is_uint3(cc));
3189 DCHECK(fmt == S || fmt == D);
3190 DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
3191 Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
3192 | cc << 8 | 3 << 4 | cond;
3193 emit(instr);
3194 }
3195
3196
c_s(FPUCondition cond,FPURegister fs,FPURegister ft,uint16_t cc)3197 void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
3198 uint16_t cc) {
3199 c(cond, S, fs, ft, cc);
3200 }
3201
3202
c_d(FPUCondition cond,FPURegister fs,FPURegister ft,uint16_t cc)3203 void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
3204 uint16_t cc) {
3205 c(cond, D, fs, ft, cc);
3206 }
3207
3208
fcmp(FPURegister src1,const double src2,FPUCondition cond)3209 void Assembler::fcmp(FPURegister src1, const double src2,
3210 FPUCondition cond) {
3211 DCHECK_EQ(src2, 0.0);
3212 mtc1(zero_reg, f14);
3213 cvt_d_w(f14, f14);
3214 c(cond, D, src1, f14, 0);
3215 }
3216
3217
bc1f(int16_t offset,uint16_t cc)3218 void Assembler::bc1f(int16_t offset, uint16_t cc) {
3219 BlockTrampolinePoolScope block_trampoline_pool(this);
3220 DCHECK(is_uint3(cc));
3221 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
3222 emit(instr);
3223 BlockTrampolinePoolFor(1); // For associated delay slot.
3224 }
3225
3226
bc1t(int16_t offset,uint16_t cc)3227 void Assembler::bc1t(int16_t offset, uint16_t cc) {
3228 BlockTrampolinePoolScope block_trampoline_pool(this);
3229 DCHECK(is_uint3(cc));
3230 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
3231 emit(instr);
3232 BlockTrampolinePoolFor(1); // For associated delay slot.
3233 }
3234
3235 // ---------- MSA instructions ------------
3236 #define MSA_BRANCH_LIST(V) \
3237 V(bz_v, BZ_V) \
3238 V(bz_b, BZ_B) \
3239 V(bz_h, BZ_H) \
3240 V(bz_w, BZ_W) \
3241 V(bz_d, BZ_D) \
3242 V(bnz_v, BNZ_V) \
3243 V(bnz_b, BNZ_B) \
3244 V(bnz_h, BNZ_H) \
3245 V(bnz_w, BNZ_W) \
3246 V(bnz_d, BNZ_D)
3247
3248 #define MSA_BRANCH(name, opcode) \
3249 void Assembler::name(MSARegister wt, int16_t offset) { \
3250 GenInstrMsaBranch(opcode, wt, offset); \
3251 }
3252
3253 MSA_BRANCH_LIST(MSA_BRANCH)
3254 #undef MSA_BRANCH
3255 #undef MSA_BRANCH_LIST
3256
3257 #define MSA_LD_ST_LIST(V) \
3258 V(ld_b, LD_B) \
3259 V(ld_h, LD_H) \
3260 V(ld_w, LD_W) \
3261 V(ld_d, LD_D) \
3262 V(st_b, ST_B) \
3263 V(st_h, ST_H) \
3264 V(st_w, ST_W) \
3265 V(st_d, ST_D)
3266
3267 #define MSA_LD_ST(name, opcode) \
3268 void Assembler::name(MSARegister wd, const MemOperand& rs) { \
3269 MemOperand source = rs; \
3270 AdjustBaseAndOffset(source); \
3271 if (is_int10(source.offset())) { \
3272 GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \
3273 } else { \
3274 UseScratchRegisterScope temps(this); \
3275 Register scratch = temps.Acquire(); \
3276 DCHECK(rs.rm() != scratch); \
3277 addiu(scratch, source.rm(), source.offset()); \
3278 GenInstrMsaMI10(opcode, 0, scratch, wd); \
3279 } \
3280 }
3281
MSA_LD_ST_LIST(MSA_LD_ST)3282 MSA_LD_ST_LIST(MSA_LD_ST)
3283 #undef MSA_LD_ST
3284 #undef MSA_BRANCH_LIST
3285
3286 #define MSA_I10_LIST(V) \
3287 V(ldi_b, I5_DF_b) \
3288 V(ldi_h, I5_DF_h) \
3289 V(ldi_w, I5_DF_w) \
3290 V(ldi_d, I5_DF_d)
3291
3292 #define MSA_I10(name, format) \
3293 void Assembler::name(MSARegister wd, int32_t imm10) { \
3294 GenInstrMsaI10(LDI, format, imm10, wd); \
3295 }
3296 MSA_I10_LIST(MSA_I10)
3297 #undef MSA_I10
3298 #undef MSA_I10_LIST
3299
3300 #define MSA_I5_LIST(V) \
3301 V(addvi, ADDVI) \
3302 V(subvi, SUBVI) \
3303 V(maxi_s, MAXI_S) \
3304 V(maxi_u, MAXI_U) \
3305 V(mini_s, MINI_S) \
3306 V(mini_u, MINI_U) \
3307 V(ceqi, CEQI) \
3308 V(clti_s, CLTI_S) \
3309 V(clti_u, CLTI_U) \
3310 V(clei_s, CLEI_S) \
3311 V(clei_u, CLEI_U)
3312
3313 #define MSA_I5_FORMAT(name, opcode, format) \
3314 void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3315 uint32_t imm5) { \
3316 GenInstrMsaI5(opcode, I5_DF_##format, imm5, ws, wd); \
3317 }
3318
3319 #define MSA_I5(name, opcode) \
3320 MSA_I5_FORMAT(name, opcode, b) \
3321 MSA_I5_FORMAT(name, opcode, h) \
3322 MSA_I5_FORMAT(name, opcode, w) \
3323 MSA_I5_FORMAT(name, opcode, d)
3324
3325 MSA_I5_LIST(MSA_I5)
3326 #undef MSA_I5
3327 #undef MSA_I5_FORMAT
3328 #undef MSA_I5_LIST
3329
3330 #define MSA_I8_LIST(V) \
3331 V(andi_b, ANDI_B) \
3332 V(ori_b, ORI_B) \
3333 V(nori_b, NORI_B) \
3334 V(xori_b, XORI_B) \
3335 V(bmnzi_b, BMNZI_B) \
3336 V(bmzi_b, BMZI_B) \
3337 V(bseli_b, BSELI_B) \
3338 V(shf_b, SHF_B) \
3339 V(shf_h, SHF_H) \
3340 V(shf_w, SHF_W)
3341
3342 #define MSA_I8(name, opcode) \
3343 void Assembler::name(MSARegister wd, MSARegister ws, uint32_t imm8) { \
3344 GenInstrMsaI8(opcode, imm8, ws, wd); \
3345 }
3346
3347 MSA_I8_LIST(MSA_I8)
3348 #undef MSA_I8
3349 #undef MSA_I8_LIST
3350
3351 #define MSA_VEC_LIST(V) \
3352 V(and_v, AND_V) \
3353 V(or_v, OR_V) \
3354 V(nor_v, NOR_V) \
3355 V(xor_v, XOR_V) \
3356 V(bmnz_v, BMNZ_V) \
3357 V(bmz_v, BMZ_V) \
3358 V(bsel_v, BSEL_V)
3359
3360 #define MSA_VEC(name, opcode) \
3361 void Assembler::name(MSARegister wd, MSARegister ws, MSARegister wt) { \
3362 GenInstrMsaVec(opcode, wt, ws, wd); \
3363 }
3364
3365 MSA_VEC_LIST(MSA_VEC)
3366 #undef MSA_VEC
3367 #undef MSA_VEC_LIST
3368
3369 #define MSA_2R_LIST(V) \
3370 V(pcnt, PCNT) \
3371 V(nloc, NLOC) \
3372 V(nlzc, NLZC)
3373
3374 #define MSA_2R_FORMAT(name, opcode, format) \
3375 void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \
3376 GenInstrMsa2R(opcode, MSA_2R_DF_##format, ws, wd); \
3377 }
3378
3379 #define MSA_2R(name, opcode) \
3380 MSA_2R_FORMAT(name, opcode, b) \
3381 MSA_2R_FORMAT(name, opcode, h) \
3382 MSA_2R_FORMAT(name, opcode, w) \
3383 MSA_2R_FORMAT(name, opcode, d)
3384
3385 MSA_2R_LIST(MSA_2R)
3386 #undef MSA_2R
3387 #undef MSA_2R_FORMAT
3388 #undef MSA_2R_LIST
3389
3390 #define MSA_FILL(format) \
3391 void Assembler::fill_##format(MSARegister wd, Register rs) { \
3392 DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); \
3393 DCHECK(rs.is_valid() && wd.is_valid()); \
3394 Instr instr = MSA | MSA_2R_FORMAT | FILL | MSA_2R_DF_##format | \
3395 (rs.code() << kWsShift) | (wd.code() << kWdShift) | \
3396 MSA_VEC_2R_2RF_MINOR; \
3397 emit(instr); \
3398 }
3399
3400 MSA_FILL(b)
3401 MSA_FILL(h)
3402 MSA_FILL(w)
3403 #undef MSA_FILL
3404
3405 #define MSA_2RF_LIST(V) \
3406 V(fclass, FCLASS) \
3407 V(ftrunc_s, FTRUNC_S) \
3408 V(ftrunc_u, FTRUNC_U) \
3409 V(fsqrt, FSQRT) \
3410 V(frsqrt, FRSQRT) \
3411 V(frcp, FRCP) \
3412 V(frint, FRINT) \
3413 V(flog2, FLOG2) \
3414 V(fexupl, FEXUPL) \
3415 V(fexupr, FEXUPR) \
3416 V(ffql, FFQL) \
3417 V(ffqr, FFQR) \
3418 V(ftint_s, FTINT_S) \
3419 V(ftint_u, FTINT_U) \
3420 V(ffint_s, FFINT_S) \
3421 V(ffint_u, FFINT_U)
3422
3423 #define MSA_2RF_FORMAT(name, opcode, format) \
3424 void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \
3425 GenInstrMsa2RF(opcode, MSA_2RF_DF_##format, ws, wd); \
3426 }
3427
3428 #define MSA_2RF(name, opcode) \
3429 MSA_2RF_FORMAT(name, opcode, w) \
3430 MSA_2RF_FORMAT(name, opcode, d)
3431
3432 MSA_2RF_LIST(MSA_2RF)
3433 #undef MSA_2RF
3434 #undef MSA_2RF_FORMAT
3435 #undef MSA_2RF_LIST
3436
3437 #define MSA_3R_LIST(V) \
3438 V(sll, SLL_MSA) \
3439 V(sra, SRA_MSA) \
3440 V(srl, SRL_MSA) \
3441 V(bclr, BCLR) \
3442 V(bset, BSET) \
3443 V(bneg, BNEG) \
3444 V(binsl, BINSL) \
3445 V(binsr, BINSR) \
3446 V(addv, ADDV) \
3447 V(subv, SUBV) \
3448 V(max_s, MAX_S) \
3449 V(max_u, MAX_U) \
3450 V(min_s, MIN_S) \
3451 V(min_u, MIN_U) \
3452 V(max_a, MAX_A) \
3453 V(min_a, MIN_A) \
3454 V(ceq, CEQ) \
3455 V(clt_s, CLT_S) \
3456 V(clt_u, CLT_U) \
3457 V(cle_s, CLE_S) \
3458 V(cle_u, CLE_U) \
3459 V(add_a, ADD_A) \
3460 V(adds_a, ADDS_A) \
3461 V(adds_s, ADDS_S) \
3462 V(adds_u, ADDS_U) \
3463 V(ave_s, AVE_S) \
3464 V(ave_u, AVE_U) \
3465 V(aver_s, AVER_S) \
3466 V(aver_u, AVER_U) \
3467 V(subs_s, SUBS_S) \
3468 V(subs_u, SUBS_U) \
3469 V(subsus_u, SUBSUS_U) \
3470 V(subsuu_s, SUBSUU_S) \
3471 V(asub_s, ASUB_S) \
3472 V(asub_u, ASUB_U) \
3473 V(mulv, MULV) \
3474 V(maddv, MADDV) \
3475 V(msubv, MSUBV) \
3476 V(div_s, DIV_S_MSA) \
3477 V(div_u, DIV_U) \
3478 V(mod_s, MOD_S) \
3479 V(mod_u, MOD_U) \
3480 V(dotp_s, DOTP_S) \
3481 V(dotp_u, DOTP_U) \
3482 V(dpadd_s, DPADD_S) \
3483 V(dpadd_u, DPADD_U) \
3484 V(dpsub_s, DPSUB_S) \
3485 V(dpsub_u, DPSUB_U) \
3486 V(pckev, PCKEV) \
3487 V(pckod, PCKOD) \
3488 V(ilvl, ILVL) \
3489 V(ilvr, ILVR) \
3490 V(ilvev, ILVEV) \
3491 V(ilvod, ILVOD) \
3492 V(vshf, VSHF) \
3493 V(srar, SRAR) \
3494 V(srlr, SRLR) \
3495 V(hadd_s, HADD_S) \
3496 V(hadd_u, HADD_U) \
3497 V(hsub_s, HSUB_S) \
3498 V(hsub_u, HSUB_U)
3499
3500 #define MSA_3R_FORMAT(name, opcode, format) \
3501 void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3502 MSARegister wt) { \
3503 GenInstrMsa3R<MSARegister>(opcode, MSA_3R_DF_##format, wt, ws, wd); \
3504 }
3505
3506 #define MSA_3R_FORMAT_SLD_SPLAT(name, opcode, format) \
3507 void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3508 Register rt) { \
3509 GenInstrMsa3R<Register>(opcode, MSA_3R_DF_##format, rt, ws, wd); \
3510 }
3511
3512 #define MSA_3R(name, opcode) \
3513 MSA_3R_FORMAT(name, opcode, b) \
3514 MSA_3R_FORMAT(name, opcode, h) \
3515 MSA_3R_FORMAT(name, opcode, w) \
3516 MSA_3R_FORMAT(name, opcode, d)
3517
3518 #define MSA_3R_SLD_SPLAT(name, opcode) \
3519 MSA_3R_FORMAT_SLD_SPLAT(name, opcode, b) \
3520 MSA_3R_FORMAT_SLD_SPLAT(name, opcode, h) \
3521 MSA_3R_FORMAT_SLD_SPLAT(name, opcode, w) \
3522 MSA_3R_FORMAT_SLD_SPLAT(name, opcode, d)
3523
3524 MSA_3R_LIST(MSA_3R)
3525 MSA_3R_SLD_SPLAT(sld, SLD)
3526 MSA_3R_SLD_SPLAT(splat, SPLAT)
3527
3528 #undef MSA_3R
3529 #undef MSA_3R_FORMAT
3530 #undef MSA_3R_FORMAT_SLD_SPLAT
3531 #undef MSA_3R_SLD_SPLAT
3532 #undef MSA_3R_LIST
3533
3534 #define MSA_3RF_LIST1(V) \
3535 V(fcaf, FCAF) \
3536 V(fcun, FCUN) \
3537 V(fceq, FCEQ) \
3538 V(fcueq, FCUEQ) \
3539 V(fclt, FCLT) \
3540 V(fcult, FCULT) \
3541 V(fcle, FCLE) \
3542 V(fcule, FCULE) \
3543 V(fsaf, FSAF) \
3544 V(fsun, FSUN) \
3545 V(fseq, FSEQ) \
3546 V(fsueq, FSUEQ) \
3547 V(fslt, FSLT) \
3548 V(fsult, FSULT) \
3549 V(fsle, FSLE) \
3550 V(fsule, FSULE) \
3551 V(fadd, FADD) \
3552 V(fsub, FSUB) \
3553 V(fmul, FMUL) \
3554 V(fdiv, FDIV) \
3555 V(fmadd, FMADD) \
3556 V(fmsub, FMSUB) \
3557 V(fexp2, FEXP2) \
3558 V(fmin, FMIN) \
3559 V(fmin_a, FMIN_A) \
3560 V(fmax, FMAX) \
3561 V(fmax_a, FMAX_A) \
3562 V(fcor, FCOR) \
3563 V(fcune, FCUNE) \
3564 V(fcne, FCNE) \
3565 V(fsor, FSOR) \
3566 V(fsune, FSUNE) \
3567 V(fsne, FSNE)
3568
3569 #define MSA_3RF_LIST2(V) \
3570 V(fexdo, FEXDO) \
3571 V(ftq, FTQ) \
3572 V(mul_q, MUL_Q) \
3573 V(madd_q, MADD_Q) \
3574 V(msub_q, MSUB_Q) \
3575 V(mulr_q, MULR_Q) \
3576 V(maddr_q, MADDR_Q) \
3577 V(msubr_q, MSUBR_Q)
3578
3579 #define MSA_3RF_FORMAT(name, opcode, df, df_c) \
3580 void Assembler::name##_##df(MSARegister wd, MSARegister ws, \
3581 MSARegister wt) { \
3582 GenInstrMsa3RF(opcode, df_c, wt, ws, wd); \
3583 }
3584
3585 #define MSA_3RF_1(name, opcode) \
3586 MSA_3RF_FORMAT(name, opcode, w, 0) \
3587 MSA_3RF_FORMAT(name, opcode, d, 1)
3588
3589 #define MSA_3RF_2(name, opcode) \
3590 MSA_3RF_FORMAT(name, opcode, h, 0) \
3591 MSA_3RF_FORMAT(name, opcode, w, 1)
3592
3593 MSA_3RF_LIST1(MSA_3RF_1)
3594 MSA_3RF_LIST2(MSA_3RF_2)
3595 #undef MSA_3RF_1
3596 #undef MSA_3RF_2
3597 #undef MSA_3RF_FORMAT
3598 #undef MSA_3RF_LIST1
3599 #undef MSA_3RF_LIST2
3600
3601 void Assembler::sldi_b(MSARegister wd, MSARegister ws, uint32_t n) {
3602 GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_B, n, ws, wd);
3603 }
3604
sldi_h(MSARegister wd,MSARegister ws,uint32_t n)3605 void Assembler::sldi_h(MSARegister wd, MSARegister ws, uint32_t n) {
3606 GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_H, n, ws, wd);
3607 }
3608
sldi_w(MSARegister wd,MSARegister ws,uint32_t n)3609 void Assembler::sldi_w(MSARegister wd, MSARegister ws, uint32_t n) {
3610 GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_W, n, ws, wd);
3611 }
3612
sldi_d(MSARegister wd,MSARegister ws,uint32_t n)3613 void Assembler::sldi_d(MSARegister wd, MSARegister ws, uint32_t n) {
3614 GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_D, n, ws, wd);
3615 }
3616
splati_b(MSARegister wd,MSARegister ws,uint32_t n)3617 void Assembler::splati_b(MSARegister wd, MSARegister ws, uint32_t n) {
3618 GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_B, n, ws, wd);
3619 }
3620
splati_h(MSARegister wd,MSARegister ws,uint32_t n)3621 void Assembler::splati_h(MSARegister wd, MSARegister ws, uint32_t n) {
3622 GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_H, n, ws, wd);
3623 }
3624
splati_w(MSARegister wd,MSARegister ws,uint32_t n)3625 void Assembler::splati_w(MSARegister wd, MSARegister ws, uint32_t n) {
3626 GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_W, n, ws, wd);
3627 }
3628
splati_d(MSARegister wd,MSARegister ws,uint32_t n)3629 void Assembler::splati_d(MSARegister wd, MSARegister ws, uint32_t n) {
3630 GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_D, n, ws, wd);
3631 }
3632
copy_s_b(Register rd,MSARegister ws,uint32_t n)3633 void Assembler::copy_s_b(Register rd, MSARegister ws, uint32_t n) {
3634 GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_B, n, ws, rd);
3635 }
3636
copy_s_h(Register rd,MSARegister ws,uint32_t n)3637 void Assembler::copy_s_h(Register rd, MSARegister ws, uint32_t n) {
3638 GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_H, n, ws, rd);
3639 }
3640
copy_s_w(Register rd,MSARegister ws,uint32_t n)3641 void Assembler::copy_s_w(Register rd, MSARegister ws, uint32_t n) {
3642 GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_W, n, ws, rd);
3643 }
3644
copy_u_b(Register rd,MSARegister ws,uint32_t n)3645 void Assembler::copy_u_b(Register rd, MSARegister ws, uint32_t n) {
3646 GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_B, n, ws, rd);
3647 }
3648
copy_u_h(Register rd,MSARegister ws,uint32_t n)3649 void Assembler::copy_u_h(Register rd, MSARegister ws, uint32_t n) {
3650 GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_H, n, ws, rd);
3651 }
3652
copy_u_w(Register rd,MSARegister ws,uint32_t n)3653 void Assembler::copy_u_w(Register rd, MSARegister ws, uint32_t n) {
3654 GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_W, n, ws, rd);
3655 }
3656
insert_b(MSARegister wd,uint32_t n,Register rs)3657 void Assembler::insert_b(MSARegister wd, uint32_t n, Register rs) {
3658 GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_B, n, rs, wd);
3659 }
3660
insert_h(MSARegister wd,uint32_t n,Register rs)3661 void Assembler::insert_h(MSARegister wd, uint32_t n, Register rs) {
3662 GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_H, n, rs, wd);
3663 }
3664
insert_w(MSARegister wd,uint32_t n,Register rs)3665 void Assembler::insert_w(MSARegister wd, uint32_t n, Register rs) {
3666 GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_W, n, rs, wd);
3667 }
3668
insve_b(MSARegister wd,uint32_t n,MSARegister ws)3669 void Assembler::insve_b(MSARegister wd, uint32_t n, MSARegister ws) {
3670 GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_B, n, ws, wd);
3671 }
3672
insve_h(MSARegister wd,uint32_t n,MSARegister ws)3673 void Assembler::insve_h(MSARegister wd, uint32_t n, MSARegister ws) {
3674 GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_H, n, ws, wd);
3675 }
3676
insve_w(MSARegister wd,uint32_t n,MSARegister ws)3677 void Assembler::insve_w(MSARegister wd, uint32_t n, MSARegister ws) {
3678 GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_W, n, ws, wd);
3679 }
3680
insve_d(MSARegister wd,uint32_t n,MSARegister ws)3681 void Assembler::insve_d(MSARegister wd, uint32_t n, MSARegister ws) {
3682 GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_D, n, ws, wd);
3683 }
3684
move_v(MSARegister wd,MSARegister ws)3685 void Assembler::move_v(MSARegister wd, MSARegister ws) {
3686 DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
3687 DCHECK(ws.is_valid() && wd.is_valid());
3688 Instr instr = MSA | MOVE_V | (ws.code() << kWsShift) |
3689 (wd.code() << kWdShift) | MSA_ELM_MINOR;
3690 emit(instr);
3691 }
3692
ctcmsa(MSAControlRegister cd,Register rs)3693 void Assembler::ctcmsa(MSAControlRegister cd, Register rs) {
3694 DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
3695 DCHECK(cd.is_valid() && rs.is_valid());
3696 Instr instr = MSA | CTCMSA | (rs.code() << kWsShift) |
3697 (cd.code() << kWdShift) | MSA_ELM_MINOR;
3698 emit(instr);
3699 }
3700
cfcmsa(Register rd,MSAControlRegister cs)3701 void Assembler::cfcmsa(Register rd, MSAControlRegister cs) {
3702 DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
3703 DCHECK(rd.is_valid() && cs.is_valid());
3704 Instr instr = MSA | CFCMSA | (cs.code() << kWsShift) |
3705 (rd.code() << kWdShift) | MSA_ELM_MINOR;
3706 emit(instr);
3707 }
3708
3709 #define MSA_BIT_LIST(V) \
3710 V(slli, SLLI) \
3711 V(srai, SRAI) \
3712 V(srli, SRLI) \
3713 V(bclri, BCLRI) \
3714 V(bseti, BSETI) \
3715 V(bnegi, BNEGI) \
3716 V(binsli, BINSLI) \
3717 V(binsri, BINSRI) \
3718 V(sat_s, SAT_S) \
3719 V(sat_u, SAT_U) \
3720 V(srari, SRARI) \
3721 V(srlri, SRLRI)
3722
3723 #define MSA_BIT_FORMAT(name, opcode, format) \
3724 void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3725 uint32_t m) { \
3726 GenInstrMsaBit(opcode, BIT_DF_##format, m, ws, wd); \
3727 }
3728
3729 #define MSA_BIT(name, opcode) \
3730 MSA_BIT_FORMAT(name, opcode, b) \
3731 MSA_BIT_FORMAT(name, opcode, h) \
3732 MSA_BIT_FORMAT(name, opcode, w) \
3733 MSA_BIT_FORMAT(name, opcode, d)
3734
MSA_BIT_LIST(MSA_BIT)3735 MSA_BIT_LIST(MSA_BIT)
3736 #undef MSA_BIT
3737 #undef MSA_BIT_FORMAT
3738 #undef MSA_BIT_LIST
3739
3740 int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
3741 intptr_t pc_delta) {
3742 Instr instr = instr_at(pc);
3743
3744 if (RelocInfo::IsInternalReference(rmode)) {
3745 int32_t* p = reinterpret_cast<int32_t*>(pc);
3746 if (*p == 0) {
3747 return 0; // Number of instructions patched.
3748 }
3749 *p += pc_delta;
3750 return 1; // Number of instructions patched.
3751 } else {
3752 DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
3753 if (IsLui(instr)) {
3754 Instr instr1 = instr_at(pc + 0 * kInstrSize);
3755 Instr instr2 = instr_at(pc + 1 * kInstrSize);
3756 DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
3757 int32_t imm;
3758 if (IsJicOrJialc(instr2)) {
3759 imm = CreateTargetAddress(instr1, instr2);
3760 } else {
3761 imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
3762 imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
3763 }
3764
3765 if (imm == kEndOfJumpChain) {
3766 return 0; // Number of instructions patched.
3767 }
3768 imm += pc_delta;
3769 DCHECK_EQ(imm & 3, 0);
3770 instr1 &= ~kImm16Mask;
3771 instr2 &= ~kImm16Mask;
3772
3773 if (IsJicOrJialc(instr2)) {
3774 uint32_t lui_offset_u, jic_offset_u;
3775 Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
3776 instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u);
3777 instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u);
3778 } else {
3779 instr_at_put(pc + 0 * kInstrSize,
3780 instr1 | ((imm >> kLuiShift) & kImm16Mask));
3781 instr_at_put(pc + 1 * kInstrSize, instr2 | (imm & kImm16Mask));
3782 }
3783 return 2; // Number of instructions patched.
3784 } else {
3785 UNREACHABLE();
3786 }
3787 }
3788 }
3789
3790
GrowBuffer()3791 void Assembler::GrowBuffer() {
3792 if (!own_buffer_) FATAL("external code buffer is too small");
3793
3794 // Compute new buffer size.
3795 CodeDesc desc; // the new buffer
3796 if (buffer_size_ < 1 * MB) {
3797 desc.buffer_size = 2*buffer_size_;
3798 } else {
3799 desc.buffer_size = buffer_size_ + 1*MB;
3800 }
3801
3802 // Some internal data structures overflow for very large buffers,
3803 // they must ensure that kMaximalBufferSize is not too large.
3804 if (desc.buffer_size > kMaximalBufferSize) {
3805 V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
3806 }
3807
3808 // Set up new buffer.
3809 desc.buffer = NewArray<byte>(desc.buffer_size);
3810 desc.origin = this;
3811
3812 desc.instr_size = pc_offset();
3813 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
3814
3815 // Copy the data.
3816 int pc_delta = desc.buffer - buffer_;
3817 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
3818 MemMove(desc.buffer, buffer_, desc.instr_size);
3819 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
3820 desc.reloc_size);
3821
3822 // Switch buffers.
3823 DeleteArray(buffer_);
3824 buffer_ = desc.buffer;
3825 buffer_size_ = desc.buffer_size;
3826 pc_ += pc_delta;
3827 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3828 reloc_info_writer.last_pc() + pc_delta);
3829
3830 // Relocate runtime entries.
3831 for (RelocIterator it(desc); !it.done(); it.next()) {
3832 RelocInfo::Mode rmode = it.rinfo()->rmode();
3833 if (rmode == RelocInfo::INTERNAL_REFERENCE_ENCODED ||
3834 rmode == RelocInfo::INTERNAL_REFERENCE) {
3835 RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta);
3836 }
3837 }
3838 DCHECK(!overflow());
3839 }
3840
3841
db(uint8_t data)3842 void Assembler::db(uint8_t data) {
3843 CheckForEmitInForbiddenSlot();
3844 EmitHelper(data);
3845 }
3846
3847
dd(uint32_t data)3848 void Assembler::dd(uint32_t data) {
3849 CheckForEmitInForbiddenSlot();
3850 EmitHelper(data);
3851 }
3852
3853
dq(uint64_t data)3854 void Assembler::dq(uint64_t data) {
3855 CheckForEmitInForbiddenSlot();
3856 EmitHelper(data);
3857 }
3858
3859
dd(Label * label)3860 void Assembler::dd(Label* label) {
3861 uint32_t data;
3862 CheckForEmitInForbiddenSlot();
3863 if (label->is_bound()) {
3864 data = reinterpret_cast<uint32_t>(buffer_ + label->pos());
3865 } else {
3866 data = jump_address(label);
3867 unbound_labels_count_++;
3868 internal_reference_positions_.insert(label->pos());
3869 }
3870 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3871 EmitHelper(data);
3872 }
3873
3874
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)3875 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3876 // We do not try to reuse pool constants.
3877 RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
3878 if (!RelocInfo::IsNone(rinfo.rmode())) {
3879 if (options().disable_reloc_info_for_patching) return;
3880 if (RelocInfo::IsOnlyForSerializer(rmode) &&
3881 !options().record_reloc_info_for_serialization && !emit_debug_code()) {
3882 return;
3883 }
3884 DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
3885 reloc_info_writer.Write(&rinfo);
3886 }
3887 }
3888
BlockTrampolinePoolFor(int instructions)3889 void Assembler::BlockTrampolinePoolFor(int instructions) {
3890 CheckTrampolinePoolQuick(instructions);
3891 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
3892 }
3893
3894
CheckTrampolinePool()3895 void Assembler::CheckTrampolinePool() {
3896 // Some small sequences of instructions must not be broken up by the
3897 // insertion of a trampoline pool; such sequences are protected by setting
3898 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
3899 // which are both checked here. Also, recursive calls to CheckTrampolinePool
3900 // are blocked by trampoline_pool_blocked_nesting_.
3901 if ((trampoline_pool_blocked_nesting_ > 0) ||
3902 (pc_offset() < no_trampoline_pool_before_)) {
3903 // Emission is currently blocked; make sure we try again as soon as
3904 // possible.
3905 if (trampoline_pool_blocked_nesting_ > 0) {
3906 next_buffer_check_ = pc_offset() + kInstrSize;
3907 } else {
3908 next_buffer_check_ = no_trampoline_pool_before_;
3909 }
3910 return;
3911 }
3912
3913 DCHECK(!trampoline_emitted_);
3914 DCHECK_GE(unbound_labels_count_, 0);
3915 if (unbound_labels_count_ > 0) {
3916 // First we emit jump (2 instructions), then we emit trampoline pool.
3917 { BlockTrampolinePoolScope block_trampoline_pool(this);
3918 Label after_pool;
3919 if (IsMipsArchVariant(kMips32r6)) {
3920 bc(&after_pool);
3921 } else {
3922 b(&after_pool);
3923 }
3924 nop();
3925
3926 int pool_start = pc_offset();
3927 for (int i = 0; i < unbound_labels_count_; i++) {
3928 {
3929 if (IsMipsArchVariant(kMips32r6)) {
3930 bc(&after_pool);
3931 nop();
3932 } else {
3933 or_(t8, ra, zero_reg);
3934 nal(); // Read PC into ra register.
3935 lui(t9, 0); // Branch delay slot.
3936 ori(t9, t9, 0);
3937 addu(t9, ra, t9);
3938 // Instruction jr will take or_ from the next trampoline.
3939 // in its branch delay slot. This is the expected behavior
3940 // in order to decrease size of trampoline pool.
3941 or_(ra, t8, zero_reg);
3942 jr(t9);
3943 }
3944 }
3945 }
3946 nop();
3947 bind(&after_pool);
3948 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
3949
3950 trampoline_emitted_ = true;
3951 // As we are only going to emit trampoline once, we need to prevent any
3952 // further emission.
3953 next_buffer_check_ = kMaxInt;
3954 }
3955 } else {
3956 // Number of branches to unbound label at this point is zero, so we can
3957 // move next buffer check to maximum.
3958 next_buffer_check_ = pc_offset() +
3959 kMaxBranchOffset - kTrampolineSlotsSize * 16;
3960 }
3961 return;
3962 }
3963
3964
target_address_at(Address pc)3965 Address Assembler::target_address_at(Address pc) {
3966 Instr instr1 = instr_at(pc);
3967 Instr instr2 = instr_at(pc + kInstrSize);
3968 // Interpret 2 instructions generated by li (lui/ori) or optimized pairs
3969 // lui/jic, aui/jic or lui/jialc.
3970 if (IsLui(instr1)) {
3971 if (IsOri(instr2)) {
3972 // Assemble the 32 bit value.
3973 return static_cast<Address>((GetImmediate16(instr1) << kLuiShift) |
3974 GetImmediate16(instr2));
3975 } else if (IsJicOrJialc(instr2)) {
3976 // Assemble the 32 bit value.
3977 return static_cast<Address>(CreateTargetAddress(instr1, instr2));
3978 }
3979 }
3980
3981 // We should never get here, force a bad address if we do.
3982 UNREACHABLE();
3983 }
3984
3985
3986 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
3987 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
3988 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
3989 // OS::nan_value() returns a qNaN.
QuietNaN(HeapObject * object)3990 void Assembler::QuietNaN(HeapObject* object) {
3991 HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
3992 }
3993
3994
3995 // On Mips, a target address is stored in a lui/ori instruction pair, each
3996 // of which load 16 bits of the 32-bit address to a register.
3997 // Patching the address must replace both instr, and flush the i-cache.
3998 // On r6, target address is stored in a lui/jic pair, and both instr have to be
3999 // patched.
4000 //
4001 // There is an optimization below, which emits a nop when the address
4002 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
4003 // and possibly removed.
set_target_value_at(Address pc,uint32_t target,ICacheFlushMode icache_flush_mode)4004 void Assembler::set_target_value_at(Address pc, uint32_t target,
4005 ICacheFlushMode icache_flush_mode) {
4006 Instr instr2 = instr_at(pc + kInstrSize);
4007 uint32_t rt_code = GetRtField(instr2);
4008 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
4009
4010 #ifdef DEBUG
4011 // Check we have the result from a li macro-instruction, using instr pair.
4012 Instr instr1 = instr_at(pc);
4013 DCHECK(IsLui(instr1) && (IsOri(instr2) || IsJicOrJialc(instr2)));
4014 #endif
4015
4016 if (IsJicOrJialc(instr2)) {
4017 // Must use 2 instructions to insure patchable code => use lui and jic
4018 uint32_t lui_offset, jic_offset;
4019 Assembler::UnpackTargetAddressUnsigned(target, lui_offset, jic_offset);
4020
4021 *p &= ~kImm16Mask;
4022 *(p + 1) &= ~kImm16Mask;
4023
4024 *p |= lui_offset;
4025 *(p + 1) |= jic_offset;
4026
4027 } else {
4028 // Must use 2 instructions to insure patchable code => just use lui and ori.
4029 // lui rt, upper-16.
4030 // ori rt rt, lower-16.
4031 *p = LUI | rt_code | ((target & kHiMask) >> kLuiShift);
4032 *(p + 1) = ORI | rt_code | (rt_code << 5) | (target & kImm16Mask);
4033 }
4034
4035 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
4036 Assembler::FlushICache(pc, 2 * sizeof(int32_t));
4037 }
4038 }
4039
UseScratchRegisterScope(Assembler * assembler)4040 UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
4041 : available_(assembler->GetScratchRegisterList()),
4042 old_available_(*available_) {}
4043
~UseScratchRegisterScope()4044 UseScratchRegisterScope::~UseScratchRegisterScope() {
4045 *available_ = old_available_;
4046 }
4047
Acquire()4048 Register UseScratchRegisterScope::Acquire() {
4049 DCHECK_NOT_NULL(available_);
4050 DCHECK_NE(*available_, 0);
4051 int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
4052 *available_ &= ~(1UL << index);
4053
4054 return Register::from_code(index);
4055 }
4056
hasAvailable() const4057 bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; }
4058
4059 } // namespace internal
4060 } // namespace v8
4061
4062 #endif // V8_TARGET_ARCH_MIPS
4063