1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34
35 #include "src/mips/assembler-mips.h"
36
37 #if V8_TARGET_ARCH_MIPS
38
39 #include "src/base/bits.h"
40 #include "src/base/cpu.h"
41 #include "src/mips/assembler-mips-inl.h"
42
43 namespace v8 {
44 namespace internal {
45
46 // Get the CPU features enabled by the build. For cross compilation the
47 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
48 // can be defined to enable FPU instructions when building the
49 // snapshot.
CpuFeaturesImpliedByCompiler()50 static unsigned CpuFeaturesImpliedByCompiler() {
51 unsigned answer = 0;
52 #ifdef CAN_USE_FPU_INSTRUCTIONS
53 answer |= 1u << FPU;
54 #endif // def CAN_USE_FPU_INSTRUCTIONS
55
56 // If the compiler is allowed to use FPU then we can use FPU too in our code
57 // generation even when generating snapshots. This won't work for cross
58 // compilation.
59 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
60 answer |= 1u << FPU;
61 #endif
62
63 return answer;
64 }
65
66
ProbeImpl(bool cross_compile)67 void CpuFeatures::ProbeImpl(bool cross_compile) {
68 supported_ |= CpuFeaturesImpliedByCompiler();
69
70 // Only use statically determined features for cross compile (snapshot).
71 if (cross_compile) return;
72
73 // If the compiler is allowed to use fpu then we can use fpu too in our
74 // code generation.
75 #ifndef __mips__
76 // For the simulator build, use FPU.
77 supported_ |= 1u << FPU;
78 #if defined(_MIPS_ARCH_MIPS32R6)
79 // FP64 mode is implied on r6.
80 supported_ |= 1u << FP64FPU;
81 #endif
82 #if defined(FPU_MODE_FP64)
83 supported_ |= 1u << FP64FPU;
84 #endif
85 #else
86 // Probe for additional features at runtime.
87 base::CPU cpu;
88 if (cpu.has_fpu()) supported_ |= 1u << FPU;
89 #if defined(FPU_MODE_FPXX)
90 if (cpu.is_fp64_mode()) supported_ |= 1u << FP64FPU;
91 #elif defined(FPU_MODE_FP64)
92 supported_ |= 1u << FP64FPU;
93 #endif
94 #if defined(_MIPS_ARCH_MIPS32RX)
95 if (cpu.architecture() == 6) {
96 supported_ |= 1u << MIPSr6;
97 } else if (cpu.architecture() == 2) {
98 supported_ |= 1u << MIPSr1;
99 supported_ |= 1u << MIPSr2;
100 } else {
101 supported_ |= 1u << MIPSr1;
102 }
103 #endif
104 #endif
105 }
106
107
PrintTarget()108 void CpuFeatures::PrintTarget() { }
PrintFeatures()109 void CpuFeatures::PrintFeatures() { }
110
111
ToNumber(Register reg)112 int ToNumber(Register reg) {
113 DCHECK(reg.is_valid());
114 const int kNumbers[] = {
115 0, // zero_reg
116 1, // at
117 2, // v0
118 3, // v1
119 4, // a0
120 5, // a1
121 6, // a2
122 7, // a3
123 8, // t0
124 9, // t1
125 10, // t2
126 11, // t3
127 12, // t4
128 13, // t5
129 14, // t6
130 15, // t7
131 16, // s0
132 17, // s1
133 18, // s2
134 19, // s3
135 20, // s4
136 21, // s5
137 22, // s6
138 23, // s7
139 24, // t8
140 25, // t9
141 26, // k0
142 27, // k1
143 28, // gp
144 29, // sp
145 30, // fp
146 31, // ra
147 };
148 return kNumbers[reg.code()];
149 }
150
151
ToRegister(int num)152 Register ToRegister(int num) {
153 DCHECK(num >= 0 && num < kNumRegisters);
154 const Register kRegisters[] = {
155 zero_reg,
156 at,
157 v0, v1,
158 a0, a1, a2, a3,
159 t0, t1, t2, t3, t4, t5, t6, t7,
160 s0, s1, s2, s3, s4, s5, s6, s7,
161 t8, t9,
162 k0, k1,
163 gp,
164 sp,
165 fp,
166 ra
167 };
168 return kRegisters[num];
169 }
170
171
172 // -----------------------------------------------------------------------------
173 // Implementation of RelocInfo.
174
175 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
176 1 << RelocInfo::INTERNAL_REFERENCE |
177 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
178
179
IsCodedSpecially()180 bool RelocInfo::IsCodedSpecially() {
181 // The deserializer needs to know whether a pointer is specially coded. Being
182 // specially coded on MIPS means that it is a lui/ori instruction, and that is
183 // always the case inside code objects.
184 return true;
185 }
186
187
IsInConstantPool()188 bool RelocInfo::IsInConstantPool() {
189 return false;
190 }
191
wasm_memory_reference()192 Address RelocInfo::wasm_memory_reference() {
193 DCHECK(IsWasmMemoryReference(rmode_));
194 return Assembler::target_address_at(pc_, host_);
195 }
196
wasm_global_reference()197 Address RelocInfo::wasm_global_reference() {
198 DCHECK(IsWasmGlobalReference(rmode_));
199 return Assembler::target_address_at(pc_, host_);
200 }
201
wasm_memory_size_reference()202 uint32_t RelocInfo::wasm_memory_size_reference() {
203 DCHECK(IsWasmMemorySizeReference(rmode_));
204 return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
205 }
206
wasm_function_table_size_reference()207 uint32_t RelocInfo::wasm_function_table_size_reference() {
208 DCHECK(IsWasmFunctionTableSizeReference(rmode_));
209 return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
210 }
211
unchecked_update_wasm_memory_reference(Address address,ICacheFlushMode flush_mode)212 void RelocInfo::unchecked_update_wasm_memory_reference(
213 Address address, ICacheFlushMode flush_mode) {
214 Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
215 }
216
unchecked_update_wasm_size(uint32_t size,ICacheFlushMode flush_mode)217 void RelocInfo::unchecked_update_wasm_size(uint32_t size,
218 ICacheFlushMode flush_mode) {
219 Assembler::set_target_address_at(isolate_, pc_, host_,
220 reinterpret_cast<Address>(size), flush_mode);
221 }
222
223 // -----------------------------------------------------------------------------
224 // Implementation of Operand and MemOperand.
225 // See assembler-mips-inl.h for inlined constructors.
226
Operand(Handle<Object> handle)227 Operand::Operand(Handle<Object> handle) {
228 AllowDeferredHandleDereference using_raw_address;
229 rm_ = no_reg;
230 // Verify all Objects referred by code are NOT in new space.
231 Object* obj = *handle;
232 if (obj->IsHeapObject()) {
233 imm32_ = reinterpret_cast<intptr_t>(handle.location());
234 rmode_ = RelocInfo::EMBEDDED_OBJECT;
235 } else {
236 // No relocation needed.
237 imm32_ = reinterpret_cast<intptr_t>(obj);
238 rmode_ = RelocInfo::NONE32;
239 }
240 }
241
242
MemOperand(Register rm,int32_t offset)243 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
244 offset_ = offset;
245 }
246
247
MemOperand(Register rm,int32_t unit,int32_t multiplier,OffsetAddend offset_addend)248 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
249 OffsetAddend offset_addend) : Operand(rm) {
250 offset_ = unit * multiplier + offset_addend;
251 }
252
253
254 // -----------------------------------------------------------------------------
255 // Specific instructions, constants, and masks.
256
257 static const int kNegOffset = 0x00008000;
258 // addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
259 // operations as post-increment of sp.
260 const Instr kPopInstruction = ADDIU | (Register::kCode_sp << kRsShift) |
261 (Register::kCode_sp << kRtShift) |
262 (kPointerSize & kImm16Mask); // NOLINT
263 // addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
264 const Instr kPushInstruction = ADDIU | (Register::kCode_sp << kRsShift) |
265 (Register::kCode_sp << kRtShift) |
266 (-kPointerSize & kImm16Mask); // NOLINT
267 // sw(r, MemOperand(sp, 0))
268 const Instr kPushRegPattern =
269 SW | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
270 // lw(r, MemOperand(sp, 0))
271 const Instr kPopRegPattern =
272 LW | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
273
274 const Instr kLwRegFpOffsetPattern =
275 LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
276
277 const Instr kSwRegFpOffsetPattern =
278 SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
279
280 const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) |
281 (kNegOffset & kImm16Mask); // NOLINT
282
283 const Instr kSwRegFpNegOffsetPattern = SW | (Register::kCode_fp << kRsShift) |
284 (kNegOffset & kImm16Mask); // NOLINT
285 // A mask for the Rt register for push, pop, lw, sw instructions.
286 const Instr kRtMask = kRtFieldMask;
287 const Instr kLwSwInstrTypeMask = 0xffe00000;
288 const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
289 const Instr kLwSwOffsetMask = kImm16Mask;
290
Assembler(Isolate * isolate,void * buffer,int buffer_size)291 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
292 : AssemblerBase(isolate, buffer, buffer_size),
293 recorded_ast_id_(TypeFeedbackId::None()) {
294 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
295
296 last_trampoline_pool_end_ = 0;
297 no_trampoline_pool_before_ = 0;
298 trampoline_pool_blocked_nesting_ = 0;
299 // We leave space (16 * kTrampolineSlotsSize)
300 // for BlockTrampolinePoolScope buffer.
301 next_buffer_check_ = FLAG_force_long_branches
302 ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
303 internal_trampoline_exception_ = false;
304 last_bound_pos_ = 0;
305
306 trampoline_emitted_ = FLAG_force_long_branches;
307 unbound_labels_count_ = 0;
308 block_buffer_growth_ = false;
309
310 ClearRecordedAstId();
311 }
312
313
GetCode(CodeDesc * desc)314 void Assembler::GetCode(CodeDesc* desc) {
315 EmitForbiddenSlotInstruction();
316 DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
317 // Set up code descriptor.
318 desc->buffer = buffer_;
319 desc->buffer_size = buffer_size_;
320 desc->instr_size = pc_offset();
321 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
322 desc->origin = this;
323 desc->constant_pool_size = 0;
324 desc->unwinding_info_size = 0;
325 desc->unwinding_info = nullptr;
326 }
327
328
Align(int m)329 void Assembler::Align(int m) {
330 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
331 EmitForbiddenSlotInstruction();
332 while ((pc_offset() & (m - 1)) != 0) {
333 nop();
334 }
335 }
336
337
CodeTargetAlign()338 void Assembler::CodeTargetAlign() {
339 // No advantage to aligning branch/call targets to more than
340 // single instruction, that I am aware of.
341 Align(4);
342 }
343
344
GetRtReg(Instr instr)345 Register Assembler::GetRtReg(Instr instr) {
346 Register rt;
347 rt.reg_code = (instr & kRtFieldMask) >> kRtShift;
348 return rt;
349 }
350
351
GetRsReg(Instr instr)352 Register Assembler::GetRsReg(Instr instr) {
353 Register rs;
354 rs.reg_code = (instr & kRsFieldMask) >> kRsShift;
355 return rs;
356 }
357
358
GetRdReg(Instr instr)359 Register Assembler::GetRdReg(Instr instr) {
360 Register rd;
361 rd.reg_code = (instr & kRdFieldMask) >> kRdShift;
362 return rd;
363 }
364
365
GetRt(Instr instr)366 uint32_t Assembler::GetRt(Instr instr) {
367 return (instr & kRtFieldMask) >> kRtShift;
368 }
369
370
GetRtField(Instr instr)371 uint32_t Assembler::GetRtField(Instr instr) {
372 return instr & kRtFieldMask;
373 }
374
375
GetRs(Instr instr)376 uint32_t Assembler::GetRs(Instr instr) {
377 return (instr & kRsFieldMask) >> kRsShift;
378 }
379
380
GetRsField(Instr instr)381 uint32_t Assembler::GetRsField(Instr instr) {
382 return instr & kRsFieldMask;
383 }
384
385
GetRd(Instr instr)386 uint32_t Assembler::GetRd(Instr instr) {
387 return (instr & kRdFieldMask) >> kRdShift;
388 }
389
390
GetRdField(Instr instr)391 uint32_t Assembler::GetRdField(Instr instr) {
392 return instr & kRdFieldMask;
393 }
394
395
GetSa(Instr instr)396 uint32_t Assembler::GetSa(Instr instr) {
397 return (instr & kSaFieldMask) >> kSaShift;
398 }
399
400
GetSaField(Instr instr)401 uint32_t Assembler::GetSaField(Instr instr) {
402 return instr & kSaFieldMask;
403 }
404
405
GetOpcodeField(Instr instr)406 uint32_t Assembler::GetOpcodeField(Instr instr) {
407 return instr & kOpcodeMask;
408 }
409
410
GetFunction(Instr instr)411 uint32_t Assembler::GetFunction(Instr instr) {
412 return (instr & kFunctionFieldMask) >> kFunctionShift;
413 }
414
415
GetFunctionField(Instr instr)416 uint32_t Assembler::GetFunctionField(Instr instr) {
417 return instr & kFunctionFieldMask;
418 }
419
420
GetImmediate16(Instr instr)421 uint32_t Assembler::GetImmediate16(Instr instr) {
422 return instr & kImm16Mask;
423 }
424
425
GetLabelConst(Instr instr)426 uint32_t Assembler::GetLabelConst(Instr instr) {
427 return instr & ~kImm16Mask;
428 }
429
430
IsPop(Instr instr)431 bool Assembler::IsPop(Instr instr) {
432 return (instr & ~kRtMask) == kPopRegPattern;
433 }
434
435
IsPush(Instr instr)436 bool Assembler::IsPush(Instr instr) {
437 return (instr & ~kRtMask) == kPushRegPattern;
438 }
439
440
IsSwRegFpOffset(Instr instr)441 bool Assembler::IsSwRegFpOffset(Instr instr) {
442 return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
443 }
444
445
IsLwRegFpOffset(Instr instr)446 bool Assembler::IsLwRegFpOffset(Instr instr) {
447 return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
448 }
449
450
IsSwRegFpNegOffset(Instr instr)451 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
452 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
453 kSwRegFpNegOffsetPattern);
454 }
455
456
IsLwRegFpNegOffset(Instr instr)457 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
458 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
459 kLwRegFpNegOffsetPattern);
460 }
461
462
463 // Labels refer to positions in the (to be) generated code.
464 // There are bound, linked, and unused labels.
465 //
466 // Bound labels refer to known positions in the already
467 // generated code. pos() is the position the label refers to.
468 //
469 // Linked labels refer to unknown positions in the code
470 // to be generated; pos() is the position of the last
471 // instruction using the label.
472
473 // The link chain is terminated by a value in the instruction of -1,
474 // which is an otherwise illegal value (branch -1 is inf loop).
475 // The instruction 16-bit offset field addresses 32-bit words, but in
476 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
477
478 const int kEndOfChain = -4;
479 // Determines the end of the Jump chain (a subset of the label link chain).
480 const int kEndOfJumpChain = 0;
481
482
IsBranch(Instr instr)483 bool Assembler::IsBranch(Instr instr) {
484 uint32_t opcode = GetOpcodeField(instr);
485 uint32_t rt_field = GetRtField(instr);
486 uint32_t rs_field = GetRsField(instr);
487 // Checks if the instruction is a branch.
488 bool isBranch =
489 opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
490 opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
491 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
492 rt_field == BLTZAL || rt_field == BGEZAL)) ||
493 (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
494 (opcode == COP1 && rs_field == BC1EQZ) ||
495 (opcode == COP1 && rs_field == BC1NEZ);
496 if (!isBranch && IsMipsArchVariant(kMips32r6)) {
497 // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
498 // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
499 isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
500 opcode == BALC ||
501 (opcode == POP66 && rs_field != 0) || // BEQZC
502 (opcode == POP76 && rs_field != 0); // BNEZC
503 }
504 return isBranch;
505 }
506
507
IsBc(Instr instr)508 bool Assembler::IsBc(Instr instr) {
509 uint32_t opcode = GetOpcodeField(instr);
510 // Checks if the instruction is a BC or BALC.
511 return opcode == BC || opcode == BALC;
512 }
513
514
IsBzc(Instr instr)515 bool Assembler::IsBzc(Instr instr) {
516 uint32_t opcode = GetOpcodeField(instr);
517 // Checks if the instruction is BEQZC or BNEZC.
518 return (opcode == POP66 && GetRsField(instr) != 0) ||
519 (opcode == POP76 && GetRsField(instr) != 0);
520 }
521
522
IsEmittedConstant(Instr instr)523 bool Assembler::IsEmittedConstant(Instr instr) {
524 uint32_t label_constant = GetLabelConst(instr);
525 return label_constant == 0; // Emitted label const in reg-exp engine.
526 }
527
528
IsBeq(Instr instr)529 bool Assembler::IsBeq(Instr instr) {
530 return GetOpcodeField(instr) == BEQ;
531 }
532
533
IsBne(Instr instr)534 bool Assembler::IsBne(Instr instr) {
535 return GetOpcodeField(instr) == BNE;
536 }
537
538
IsBeqzc(Instr instr)539 bool Assembler::IsBeqzc(Instr instr) {
540 uint32_t opcode = GetOpcodeField(instr);
541 return opcode == POP66 && GetRsField(instr) != 0;
542 }
543
544
IsBnezc(Instr instr)545 bool Assembler::IsBnezc(Instr instr) {
546 uint32_t opcode = GetOpcodeField(instr);
547 return opcode == POP76 && GetRsField(instr) != 0;
548 }
549
550
IsBeqc(Instr instr)551 bool Assembler::IsBeqc(Instr instr) {
552 uint32_t opcode = GetOpcodeField(instr);
553 uint32_t rs = GetRsField(instr);
554 uint32_t rt = GetRtField(instr);
555 return opcode == POP10 && rs != 0 && rs < rt; // && rt != 0
556 }
557
558
IsBnec(Instr instr)559 bool Assembler::IsBnec(Instr instr) {
560 uint32_t opcode = GetOpcodeField(instr);
561 uint32_t rs = GetRsField(instr);
562 uint32_t rt = GetRtField(instr);
563 return opcode == POP30 && rs != 0 && rs < rt; // && rt != 0
564 }
565
IsJicOrJialc(Instr instr)566 bool Assembler::IsJicOrJialc(Instr instr) {
567 uint32_t opcode = GetOpcodeField(instr);
568 uint32_t rs = GetRsField(instr);
569 return (opcode == POP66 || opcode == POP76) && rs == 0;
570 }
571
IsJump(Instr instr)572 bool Assembler::IsJump(Instr instr) {
573 uint32_t opcode = GetOpcodeField(instr);
574 uint32_t rt_field = GetRtField(instr);
575 uint32_t rd_field = GetRdField(instr);
576 uint32_t function_field = GetFunctionField(instr);
577 // Checks if the instruction is a jump.
578 return opcode == J || opcode == JAL ||
579 (opcode == SPECIAL && rt_field == 0 &&
580 ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
581 }
582
IsJ(Instr instr)583 bool Assembler::IsJ(Instr instr) {
584 uint32_t opcode = GetOpcodeField(instr);
585 // Checks if the instruction is a jump.
586 return opcode == J;
587 }
588
589
IsJal(Instr instr)590 bool Assembler::IsJal(Instr instr) {
591 return GetOpcodeField(instr) == JAL;
592 }
593
594
IsJr(Instr instr)595 bool Assembler::IsJr(Instr instr) {
596 if (!IsMipsArchVariant(kMips32r6)) {
597 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
598 } else {
599 return GetOpcodeField(instr) == SPECIAL &&
600 GetRdField(instr) == 0 && GetFunctionField(instr) == JALR;
601 }
602 }
603
604
IsJalr(Instr instr)605 bool Assembler::IsJalr(Instr instr) {
606 return GetOpcodeField(instr) == SPECIAL &&
607 GetRdField(instr) != 0 && GetFunctionField(instr) == JALR;
608 }
609
610
IsLui(Instr instr)611 bool Assembler::IsLui(Instr instr) {
612 uint32_t opcode = GetOpcodeField(instr);
613 // Checks if the instruction is a load upper immediate.
614 return opcode == LUI;
615 }
616
617
IsOri(Instr instr)618 bool Assembler::IsOri(Instr instr) {
619 uint32_t opcode = GetOpcodeField(instr);
620 // Checks if the instruction is a load upper immediate.
621 return opcode == ORI;
622 }
623
624
IsNop(Instr instr,unsigned int type)625 bool Assembler::IsNop(Instr instr, unsigned int type) {
626 // See Assembler::nop(type).
627 DCHECK(type < 32);
628 uint32_t opcode = GetOpcodeField(instr);
629 uint32_t function = GetFunctionField(instr);
630 uint32_t rt = GetRt(instr);
631 uint32_t rd = GetRd(instr);
632 uint32_t sa = GetSa(instr);
633
634 // Traditional mips nop == sll(zero_reg, zero_reg, 0)
635 // When marking non-zero type, use sll(zero_reg, at, type)
636 // to avoid use of mips ssnop and ehb special encodings
637 // of the sll instruction.
638
639 Register nop_rt_reg = (type == 0) ? zero_reg : at;
640 bool ret = (opcode == SPECIAL && function == SLL &&
641 rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
642 rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
643 sa == type);
644
645 return ret;
646 }
647
648
GetBranchOffset(Instr instr)649 int32_t Assembler::GetBranchOffset(Instr instr) {
650 DCHECK(IsBranch(instr));
651 return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
652 }
653
654
IsLw(Instr instr)655 bool Assembler::IsLw(Instr instr) {
656 return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
657 }
658
659
GetLwOffset(Instr instr)660 int16_t Assembler::GetLwOffset(Instr instr) {
661 DCHECK(IsLw(instr));
662 return ((instr & kImm16Mask));
663 }
664
665
SetLwOffset(Instr instr,int16_t offset)666 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
667 DCHECK(IsLw(instr));
668
669 // We actually create a new lw instruction based on the original one.
670 Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
671 | (offset & kImm16Mask);
672
673 return temp_instr;
674 }
675
676
IsSw(Instr instr)677 bool Assembler::IsSw(Instr instr) {
678 return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
679 }
680
681
SetSwOffset(Instr instr,int16_t offset)682 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
683 DCHECK(IsSw(instr));
684 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
685 }
686
687
IsAddImmediate(Instr instr)688 bool Assembler::IsAddImmediate(Instr instr) {
689 return ((instr & kOpcodeMask) == ADDIU);
690 }
691
692
SetAddImmediateOffset(Instr instr,int16_t offset)693 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
694 DCHECK(IsAddImmediate(instr));
695 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
696 }
697
698
IsAndImmediate(Instr instr)699 bool Assembler::IsAndImmediate(Instr instr) {
700 return GetOpcodeField(instr) == ANDI;
701 }
702
703
OffsetSizeInBits(Instr instr)704 static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
705 if (IsMipsArchVariant(kMips32r6)) {
706 if (Assembler::IsBc(instr)) {
707 return Assembler::OffsetSize::kOffset26;
708 } else if (Assembler::IsBzc(instr)) {
709 return Assembler::OffsetSize::kOffset21;
710 }
711 }
712 return Assembler::OffsetSize::kOffset16;
713 }
714
715
AddBranchOffset(int pos,Instr instr)716 static inline int32_t AddBranchOffset(int pos, Instr instr) {
717 int bits = OffsetSizeInBits(instr);
718 const int32_t mask = (1 << bits) - 1;
719 bits = 32 - bits;
720
721 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
722 // the compiler uses arithmetic shifts for signed integers.
723 int32_t imm = ((instr & mask) << bits) >> (bits - 2);
724
725 if (imm == kEndOfChain) {
726 // EndOfChain sentinel is returned directly, not relative to pc or pos.
727 return kEndOfChain;
728 } else {
729 return pos + Assembler::kBranchPCOffset + imm;
730 }
731 }
732
CreateTargetAddress(Instr instr_lui,Instr instr_jic)733 uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) {
734 DCHECK(IsLui(instr_lui) && IsJicOrJialc(instr_jic));
735 int16_t jic_offset = GetImmediate16(instr_jic);
736 int16_t lui_offset = GetImmediate16(instr_lui);
737
738 if (jic_offset < 0) {
739 lui_offset += kImm16Mask;
740 }
741 uint32_t lui_offset_u = (static_cast<uint32_t>(lui_offset)) << kLuiShift;
742 uint32_t jic_offset_u = static_cast<uint32_t>(jic_offset) & kImm16Mask;
743
744 return lui_offset_u | jic_offset_u;
745 }
746
747 // Use just lui and jic instructions. Insert lower part of the target address in
748 // jic offset part. Since jic sign-extends offset and then add it with register,
749 // before that addition, difference between upper part of the target address and
750 // upper part of the sign-extended offset (0xffff or 0x0000), will be inserted
751 // in jic register with lui instruction.
UnpackTargetAddress(uint32_t address,int16_t & lui_offset,int16_t & jic_offset)752 void Assembler::UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
753 int16_t& jic_offset) {
754 lui_offset = (address & kHiMask) >> kLuiShift;
755 jic_offset = address & kLoMask;
756
757 if (jic_offset < 0) {
758 lui_offset -= kImm16Mask;
759 }
760 }
761
UnpackTargetAddressUnsigned(uint32_t address,uint32_t & lui_offset,uint32_t & jic_offset)762 void Assembler::UnpackTargetAddressUnsigned(uint32_t address,
763 uint32_t& lui_offset,
764 uint32_t& jic_offset) {
765 int16_t lui_offset16 = (address & kHiMask) >> kLuiShift;
766 int16_t jic_offset16 = address & kLoMask;
767
768 if (jic_offset16 < 0) {
769 lui_offset16 -= kImm16Mask;
770 }
771 lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask;
772 jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask;
773 }
774
target_at(int pos,bool is_internal)775 int Assembler::target_at(int pos, bool is_internal) {
776 Instr instr = instr_at(pos);
777 if (is_internal) {
778 if (instr == 0) {
779 return kEndOfChain;
780 } else {
781 int32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
782 int delta = static_cast<int>(instr_address - instr);
783 DCHECK(pos > delta);
784 return pos - delta;
785 }
786 }
787 if ((instr & ~kImm16Mask) == 0) {
788 // Emitted label constant, not part of a branch.
789 if (instr == 0) {
790 return kEndOfChain;
791 } else {
792 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
793 return (imm18 + pos);
794 }
795 }
796 // Check we have a branch or jump instruction.
797 DCHECK(IsBranch(instr) || IsLui(instr));
798 if (IsBranch(instr)) {
799 return AddBranchOffset(pos, instr);
800 } else {
801 Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
802 Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
803 DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
804 int32_t imm;
805 if (IsJicOrJialc(instr2)) {
806 imm = CreateTargetAddress(instr1, instr2);
807 } else {
808 imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
809 imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
810 }
811
812 if (imm == kEndOfJumpChain) {
813 // EndOfChain sentinel is returned directly, not relative to pc or pos.
814 return kEndOfChain;
815 } else {
816 uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
817 int32_t delta = instr_address - imm;
818 DCHECK(pos > delta);
819 return pos - delta;
820 }
821 }
822 return 0;
823 }
824
825
SetBranchOffset(int32_t pos,int32_t target_pos,Instr instr)826 static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
827 Instr instr) {
828 int32_t bits = OffsetSizeInBits(instr);
829 int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
830 DCHECK((imm & 3) == 0);
831 imm >>= 2;
832
833 const int32_t mask = (1 << bits) - 1;
834 instr &= ~mask;
835 DCHECK(is_intn(imm, bits));
836
837 return instr | (imm & mask);
838 }
839
840
target_at_put(int32_t pos,int32_t target_pos,bool is_internal)841 void Assembler::target_at_put(int32_t pos, int32_t target_pos,
842 bool is_internal) {
843 Instr instr = instr_at(pos);
844
845 if (is_internal) {
846 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
847 instr_at_put(pos, imm);
848 return;
849 }
850 if ((instr & ~kImm16Mask) == 0) {
851 DCHECK(target_pos == kEndOfChain || target_pos >= 0);
852 // Emitted label constant, not part of a branch.
853 // Make label relative to Code* of generated Code object.
854 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
855 return;
856 }
857
858 DCHECK(IsBranch(instr) || IsLui(instr));
859 if (IsBranch(instr)) {
860 instr = SetBranchOffset(pos, target_pos, instr);
861 instr_at_put(pos, instr);
862 } else {
863 Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
864 Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
865 DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
866 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
867 DCHECK((imm & 3) == 0);
868 DCHECK(IsLui(instr1) && (IsJicOrJialc(instr2) || IsOri(instr2)));
869 instr1 &= ~kImm16Mask;
870 instr2 &= ~kImm16Mask;
871
872 if (IsJicOrJialc(instr2)) {
873 uint32_t lui_offset_u, jic_offset_u;
874 UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
875 instr_at_put(pos + 0 * Assembler::kInstrSize, instr1 | lui_offset_u);
876 instr_at_put(pos + 1 * Assembler::kInstrSize, instr2 | jic_offset_u);
877 } else {
878 instr_at_put(pos + 0 * Assembler::kInstrSize,
879 instr1 | ((imm & kHiMask) >> kLuiShift));
880 instr_at_put(pos + 1 * Assembler::kInstrSize,
881 instr2 | (imm & kImm16Mask));
882 }
883 }
884 }
885
886
print(Label * L)887 void Assembler::print(Label* L) {
888 if (L->is_unused()) {
889 PrintF("unused label\n");
890 } else if (L->is_bound()) {
891 PrintF("bound label to %d\n", L->pos());
892 } else if (L->is_linked()) {
893 Label l = *L;
894 PrintF("unbound label");
895 while (l.is_linked()) {
896 PrintF("@ %d ", l.pos());
897 Instr instr = instr_at(l.pos());
898 if ((instr & ~kImm16Mask) == 0) {
899 PrintF("value\n");
900 } else {
901 PrintF("%d\n", instr);
902 }
903 next(&l, is_internal_reference(&l));
904 }
905 } else {
906 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
907 }
908 }
909
910
bind_to(Label * L,int pos)911 void Assembler::bind_to(Label* L, int pos) {
912 DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
913 int32_t trampoline_pos = kInvalidSlotPos;
914 bool is_internal = false;
915 if (L->is_linked() && !trampoline_emitted_) {
916 unbound_labels_count_--;
917 if (!is_internal_reference(L)) {
918 next_buffer_check_ += kTrampolineSlotsSize;
919 }
920 }
921
922 while (L->is_linked()) {
923 int32_t fixup_pos = L->pos();
924 int32_t dist = pos - fixup_pos;
925 is_internal = is_internal_reference(L);
926 next(L, is_internal); // Call next before overwriting link with target at
927 // fixup_pos.
928 Instr instr = instr_at(fixup_pos);
929 if (is_internal) {
930 target_at_put(fixup_pos, pos, is_internal);
931 } else {
932 if (IsBranch(instr)) {
933 int branch_offset = BranchOffset(instr);
934 if (dist > branch_offset) {
935 if (trampoline_pos == kInvalidSlotPos) {
936 trampoline_pos = get_trampoline_entry(fixup_pos);
937 CHECK(trampoline_pos != kInvalidSlotPos);
938 }
939 CHECK((trampoline_pos - fixup_pos) <= branch_offset);
940 target_at_put(fixup_pos, trampoline_pos, false);
941 fixup_pos = trampoline_pos;
942 }
943 target_at_put(fixup_pos, pos, false);
944 } else {
945 target_at_put(fixup_pos, pos, false);
946 }
947 }
948 }
949 L->bind_to(pos);
950
951 // Keep track of the last bound label so we don't eliminate any instructions
952 // before a bound label.
953 if (pos > last_bound_pos_)
954 last_bound_pos_ = pos;
955 }
956
957
bind(Label * L)958 void Assembler::bind(Label* L) {
959 DCHECK(!L->is_bound()); // Label can only be bound once.
960 bind_to(L, pc_offset());
961 }
962
963
next(Label * L,bool is_internal)964 void Assembler::next(Label* L, bool is_internal) {
965 DCHECK(L->is_linked());
966 int link = target_at(L->pos(), is_internal);
967 if (link == kEndOfChain) {
968 L->Unuse();
969 } else {
970 DCHECK(link >= 0);
971 L->link_to(link);
972 }
973 }
974
975
is_near(Label * L)976 bool Assembler::is_near(Label* L) {
977 DCHECK(L->is_bound());
978 return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
979 }
980
981
is_near(Label * L,OffsetSize bits)982 bool Assembler::is_near(Label* L, OffsetSize bits) {
983 if (L == nullptr || !L->is_bound()) return true;
984 return pc_offset() - L->pos() < (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize;
985 }
986
987
is_near_branch(Label * L)988 bool Assembler::is_near_branch(Label* L) {
989 DCHECK(L->is_bound());
990 return IsMipsArchVariant(kMips32r6) ? is_near_r6(L) : is_near_pre_r6(L);
991 }
992
993
BranchOffset(Instr instr)994 int Assembler::BranchOffset(Instr instr) {
995 // At pre-R6 and for other R6 branches the offset is 16 bits.
996 int bits = OffsetSize::kOffset16;
997
998 if (IsMipsArchVariant(kMips32r6)) {
999 uint32_t opcode = GetOpcodeField(instr);
1000 switch (opcode) {
1001 // Checks BC or BALC.
1002 case BC:
1003 case BALC:
1004 bits = OffsetSize::kOffset26;
1005 break;
1006
1007 // Checks BEQZC or BNEZC.
1008 case POP66:
1009 case POP76:
1010 if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
1011 break;
1012 default:
1013 break;
1014 }
1015 }
1016
1017 return (1 << (bits + 2 - 1)) - 1;
1018 }
1019
1020
1021 // We have to use a temporary register for things that can be relocated even
1022 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
1023 // space. There is no guarantee that the relocated location can be similarly
1024 // encoded.
MustUseReg(RelocInfo::Mode rmode)1025 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
1026 return !RelocInfo::IsNone(rmode);
1027 }
1028
GenInstrRegister(Opcode opcode,Register rs,Register rt,Register rd,uint16_t sa,SecondaryField func)1029 void Assembler::GenInstrRegister(Opcode opcode,
1030 Register rs,
1031 Register rt,
1032 Register rd,
1033 uint16_t sa,
1034 SecondaryField func) {
1035 DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
1036 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1037 | (rd.code() << kRdShift) | (sa << kSaShift) | func;
1038 emit(instr);
1039 }
1040
1041
GenInstrRegister(Opcode opcode,Register rs,Register rt,uint16_t msb,uint16_t lsb,SecondaryField func)1042 void Assembler::GenInstrRegister(Opcode opcode,
1043 Register rs,
1044 Register rt,
1045 uint16_t msb,
1046 uint16_t lsb,
1047 SecondaryField func) {
1048 DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
1049 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1050 | (msb << kRdShift) | (lsb << kSaShift) | func;
1051 emit(instr);
1052 }
1053
1054
GenInstrRegister(Opcode opcode,SecondaryField fmt,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)1055 void Assembler::GenInstrRegister(Opcode opcode,
1056 SecondaryField fmt,
1057 FPURegister ft,
1058 FPURegister fs,
1059 FPURegister fd,
1060 SecondaryField func) {
1061 DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
1062 Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
1063 | (fd.code() << kFdShift) | func;
1064 emit(instr);
1065 }
1066
1067
GenInstrRegister(Opcode opcode,FPURegister fr,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)1068 void Assembler::GenInstrRegister(Opcode opcode,
1069 FPURegister fr,
1070 FPURegister ft,
1071 FPURegister fs,
1072 FPURegister fd,
1073 SecondaryField func) {
1074 DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
1075 Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
1076 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1077 emit(instr);
1078 }
1079
1080
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPURegister fs,FPURegister fd,SecondaryField func)1081 void Assembler::GenInstrRegister(Opcode opcode,
1082 SecondaryField fmt,
1083 Register rt,
1084 FPURegister fs,
1085 FPURegister fd,
1086 SecondaryField func) {
1087 DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
1088 Instr instr = opcode | fmt | (rt.code() << kRtShift)
1089 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1090 emit(instr);
1091 }
1092
1093
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPUControlRegister fs,SecondaryField func)1094 void Assembler::GenInstrRegister(Opcode opcode,
1095 SecondaryField fmt,
1096 Register rt,
1097 FPUControlRegister fs,
1098 SecondaryField func) {
1099 DCHECK(fs.is_valid() && rt.is_valid());
1100 Instr instr =
1101 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
1102 emit(instr);
1103 }
1104
1105
1106 // Instructions with immediate value.
1107 // Registers are in the order of the instruction encoding, from left to right.
GenInstrImmediate(Opcode opcode,Register rs,Register rt,int32_t j,CompactBranchType is_compact_branch)1108 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
1109 int32_t j,
1110 CompactBranchType is_compact_branch) {
1111 DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
1112 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1113 | (j & kImm16Mask);
1114 emit(instr, is_compact_branch);
1115 }
1116
1117
GenInstrImmediate(Opcode opcode,Register rs,SecondaryField SF,int32_t j,CompactBranchType is_compact_branch)1118 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
1119 int32_t j,
1120 CompactBranchType is_compact_branch) {
1121 DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
1122 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
1123 emit(instr, is_compact_branch);
1124 }
1125
1126
GenInstrImmediate(Opcode opcode,Register rs,FPURegister ft,int32_t j,CompactBranchType is_compact_branch)1127 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
1128 int32_t j,
1129 CompactBranchType is_compact_branch) {
1130 DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
1131 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
1132 | (j & kImm16Mask);
1133 emit(instr, is_compact_branch);
1134 }
1135
1136
GenInstrImmediate(Opcode opcode,Register rs,int32_t offset21,CompactBranchType is_compact_branch)1137 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
1138 CompactBranchType is_compact_branch) {
1139 DCHECK(rs.is_valid() && (is_int21(offset21)));
1140 Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1141 emit(instr, is_compact_branch);
1142 }
1143
1144
GenInstrImmediate(Opcode opcode,Register rs,uint32_t offset21)1145 void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
1146 uint32_t offset21) {
1147 DCHECK(rs.is_valid() && (is_uint21(offset21)));
1148 Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1149 emit(instr);
1150 }
1151
1152
GenInstrImmediate(Opcode opcode,int32_t offset26,CompactBranchType is_compact_branch)1153 void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
1154 CompactBranchType is_compact_branch) {
1155 DCHECK(is_int26(offset26));
1156 Instr instr = opcode | (offset26 & kImm26Mask);
1157 emit(instr, is_compact_branch);
1158 }
1159
1160
GenInstrJump(Opcode opcode,uint32_t address)1161 void Assembler::GenInstrJump(Opcode opcode,
1162 uint32_t address) {
1163 BlockTrampolinePoolScope block_trampoline_pool(this);
1164 DCHECK(is_uint26(address));
1165 Instr instr = opcode | address;
1166 emit(instr);
1167 BlockTrampolinePoolFor(1); // For associated delay slot.
1168 }
1169
1170
1171 // Returns the next free trampoline entry.
get_trampoline_entry(int32_t pos)1172 int32_t Assembler::get_trampoline_entry(int32_t pos) {
1173 int32_t trampoline_entry = kInvalidSlotPos;
1174
1175 if (!internal_trampoline_exception_) {
1176 if (trampoline_.start() > pos) {
1177 trampoline_entry = trampoline_.take_slot();
1178 }
1179
1180 if (kInvalidSlotPos == trampoline_entry) {
1181 internal_trampoline_exception_ = true;
1182 }
1183 }
1184 return trampoline_entry;
1185 }
1186
1187
jump_address(Label * L)1188 uint32_t Assembler::jump_address(Label* L) {
1189 int32_t target_pos;
1190
1191 if (L->is_bound()) {
1192 target_pos = L->pos();
1193 } else {
1194 if (L->is_linked()) {
1195 target_pos = L->pos(); // L's link.
1196 L->link_to(pc_offset());
1197 } else {
1198 L->link_to(pc_offset());
1199 return kEndOfJumpChain;
1200 }
1201 }
1202
1203 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
1204 DCHECK((imm & 3) == 0);
1205
1206 return imm;
1207 }
1208
1209
branch_offset_helper(Label * L,OffsetSize bits)1210 int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
1211 int32_t target_pos;
1212 int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1213
1214 if (L->is_bound()) {
1215 target_pos = L->pos();
1216 } else {
1217 if (L->is_linked()) {
1218 target_pos = L->pos();
1219 L->link_to(pc_offset() + pad);
1220 } else {
1221 L->link_to(pc_offset() + pad);
1222 if (!trampoline_emitted_) {
1223 unbound_labels_count_++;
1224 next_buffer_check_ -= kTrampolineSlotsSize;
1225 }
1226 return kEndOfChain;
1227 }
1228 }
1229
1230 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
1231 DCHECK(is_intn(offset, bits + 2));
1232 DCHECK((offset & 3) == 0);
1233
1234 return offset;
1235 }
1236
1237
label_at_put(Label * L,int at_offset)1238 void Assembler::label_at_put(Label* L, int at_offset) {
1239 int target_pos;
1240 if (L->is_bound()) {
1241 target_pos = L->pos();
1242 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1243 } else {
1244 if (L->is_linked()) {
1245 target_pos = L->pos(); // L's link.
1246 int32_t imm18 = target_pos - at_offset;
1247 DCHECK((imm18 & 3) == 0);
1248 int32_t imm16 = imm18 >> 2;
1249 DCHECK(is_int16(imm16));
1250 instr_at_put(at_offset, (imm16 & kImm16Mask));
1251 } else {
1252 target_pos = kEndOfChain;
1253 instr_at_put(at_offset, 0);
1254 if (!trampoline_emitted_) {
1255 unbound_labels_count_++;
1256 next_buffer_check_ -= kTrampolineSlotsSize;
1257 }
1258 }
1259 L->link_to(at_offset);
1260 }
1261 }
1262
1263
1264 //------- Branch and jump instructions --------
1265
b(int16_t offset)1266 void Assembler::b(int16_t offset) {
1267 beq(zero_reg, zero_reg, offset);
1268 }
1269
1270
bal(int16_t offset)1271 void Assembler::bal(int16_t offset) {
1272 bgezal(zero_reg, offset);
1273 }
1274
1275
bc(int32_t offset)1276 void Assembler::bc(int32_t offset) {
1277 DCHECK(IsMipsArchVariant(kMips32r6));
1278 GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
1279 }
1280
1281
balc(int32_t offset)1282 void Assembler::balc(int32_t offset) {
1283 DCHECK(IsMipsArchVariant(kMips32r6));
1284 GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
1285 }
1286
1287
beq(Register rs,Register rt,int16_t offset)1288 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1289 BlockTrampolinePoolScope block_trampoline_pool(this);
1290 GenInstrImmediate(BEQ, rs, rt, offset);
1291 BlockTrampolinePoolFor(1); // For associated delay slot.
1292 }
1293
1294
bgez(Register rs,int16_t offset)1295 void Assembler::bgez(Register rs, int16_t offset) {
1296 BlockTrampolinePoolScope block_trampoline_pool(this);
1297 GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1298 BlockTrampolinePoolFor(1); // For associated delay slot.
1299 }
1300
1301
bgezc(Register rt,int16_t offset)1302 void Assembler::bgezc(Register rt, int16_t offset) {
1303 DCHECK(IsMipsArchVariant(kMips32r6));
1304 DCHECK(!(rt.is(zero_reg)));
1305 GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1306 }
1307
1308
bgeuc(Register rs,Register rt,int16_t offset)1309 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1310 DCHECK(IsMipsArchVariant(kMips32r6));
1311 DCHECK(!(rs.is(zero_reg)));
1312 DCHECK(!(rt.is(zero_reg)));
1313 DCHECK(rs.code() != rt.code());
1314 GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1315 }
1316
1317
bgec(Register rs,Register rt,int16_t offset)1318 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1319 DCHECK(IsMipsArchVariant(kMips32r6));
1320 DCHECK(!(rs.is(zero_reg)));
1321 DCHECK(!(rt.is(zero_reg)));
1322 DCHECK(rs.code() != rt.code());
1323 GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1324 }
1325
1326
bgezal(Register rs,int16_t offset)1327 void Assembler::bgezal(Register rs, int16_t offset) {
1328 DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
1329 BlockTrampolinePoolScope block_trampoline_pool(this);
1330 GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1331 BlockTrampolinePoolFor(1); // For associated delay slot.
1332 }
1333
1334
bgtz(Register rs,int16_t offset)1335 void Assembler::bgtz(Register rs, int16_t offset) {
1336 BlockTrampolinePoolScope block_trampoline_pool(this);
1337 GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1338 BlockTrampolinePoolFor(1); // For associated delay slot.
1339 }
1340
1341
bgtzc(Register rt,int16_t offset)1342 void Assembler::bgtzc(Register rt, int16_t offset) {
1343 DCHECK(IsMipsArchVariant(kMips32r6));
1344 DCHECK(!(rt.is(zero_reg)));
1345 GenInstrImmediate(BGTZL, zero_reg, rt, offset,
1346 CompactBranchType::COMPACT_BRANCH);
1347 }
1348
1349
blez(Register rs,int16_t offset)1350 void Assembler::blez(Register rs, int16_t offset) {
1351 BlockTrampolinePoolScope block_trampoline_pool(this);
1352 GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1353 BlockTrampolinePoolFor(1); // For associated delay slot.
1354 }
1355
1356
blezc(Register rt,int16_t offset)1357 void Assembler::blezc(Register rt, int16_t offset) {
1358 DCHECK(IsMipsArchVariant(kMips32r6));
1359 DCHECK(!(rt.is(zero_reg)));
1360 GenInstrImmediate(BLEZL, zero_reg, rt, offset,
1361 CompactBranchType::COMPACT_BRANCH);
1362 }
1363
1364
bltzc(Register rt,int16_t offset)1365 void Assembler::bltzc(Register rt, int16_t offset) {
1366 DCHECK(IsMipsArchVariant(kMips32r6));
1367 DCHECK(!rt.is(zero_reg));
1368 GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1369 }
1370
1371
bltuc(Register rs,Register rt,int16_t offset)1372 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1373 DCHECK(IsMipsArchVariant(kMips32r6));
1374 DCHECK(!(rs.is(zero_reg)));
1375 DCHECK(!(rt.is(zero_reg)));
1376 DCHECK(rs.code() != rt.code());
1377 GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1378 }
1379
1380
bltc(Register rs,Register rt,int16_t offset)1381 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1382 DCHECK(IsMipsArchVariant(kMips32r6));
1383 DCHECK(!rs.is(zero_reg));
1384 DCHECK(!rt.is(zero_reg));
1385 DCHECK(rs.code() != rt.code());
1386 GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1387 }
1388
1389
bltz(Register rs,int16_t offset)1390 void Assembler::bltz(Register rs, int16_t offset) {
1391 BlockTrampolinePoolScope block_trampoline_pool(this);
1392 GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1393 BlockTrampolinePoolFor(1); // For associated delay slot.
1394 }
1395
1396
bltzal(Register rs,int16_t offset)1397 void Assembler::bltzal(Register rs, int16_t offset) {
1398 DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
1399 BlockTrampolinePoolScope block_trampoline_pool(this);
1400 GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1401 BlockTrampolinePoolFor(1); // For associated delay slot.
1402 }
1403
1404
bne(Register rs,Register rt,int16_t offset)1405 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1406 BlockTrampolinePoolScope block_trampoline_pool(this);
1407 GenInstrImmediate(BNE, rs, rt, offset);
1408 BlockTrampolinePoolFor(1); // For associated delay slot.
1409 }
1410
1411
bovc(Register rs,Register rt,int16_t offset)1412 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1413 DCHECK(IsMipsArchVariant(kMips32r6));
1414 if (rs.code() >= rt.code()) {
1415 GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1416 } else {
1417 GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1418 }
1419 }
1420
1421
bnvc(Register rs,Register rt,int16_t offset)1422 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1423 DCHECK(IsMipsArchVariant(kMips32r6));
1424 if (rs.code() >= rt.code()) {
1425 GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1426 } else {
1427 GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1428 }
1429 }
1430
1431
blezalc(Register rt,int16_t offset)1432 void Assembler::blezalc(Register rt, int16_t offset) {
1433 DCHECK(IsMipsArchVariant(kMips32r6));
1434 DCHECK(!(rt.is(zero_reg)));
1435 GenInstrImmediate(BLEZ, zero_reg, rt, offset,
1436 CompactBranchType::COMPACT_BRANCH);
1437 }
1438
1439
bgezalc(Register rt,int16_t offset)1440 void Assembler::bgezalc(Register rt, int16_t offset) {
1441 DCHECK(IsMipsArchVariant(kMips32r6));
1442 DCHECK(!(rt.is(zero_reg)));
1443 GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1444 }
1445
1446
bgezall(Register rs,int16_t offset)1447 void Assembler::bgezall(Register rs, int16_t offset) {
1448 DCHECK(!IsMipsArchVariant(kMips32r6));
1449 DCHECK(!(rs.is(zero_reg)));
1450 BlockTrampolinePoolScope block_trampoline_pool(this);
1451 GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1452 BlockTrampolinePoolFor(1); // For associated delay slot.
1453 }
1454
1455
bltzalc(Register rt,int16_t offset)1456 void Assembler::bltzalc(Register rt, int16_t offset) {
1457 DCHECK(IsMipsArchVariant(kMips32r6));
1458 DCHECK(!(rt.is(zero_reg)));
1459 GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1460 }
1461
1462
bgtzalc(Register rt,int16_t offset)1463 void Assembler::bgtzalc(Register rt, int16_t offset) {
1464 DCHECK(IsMipsArchVariant(kMips32r6));
1465 DCHECK(!(rt.is(zero_reg)));
1466 GenInstrImmediate(BGTZ, zero_reg, rt, offset,
1467 CompactBranchType::COMPACT_BRANCH);
1468 }
1469
1470
beqzalc(Register rt,int16_t offset)1471 void Assembler::beqzalc(Register rt, int16_t offset) {
1472 DCHECK(IsMipsArchVariant(kMips32r6));
1473 DCHECK(!(rt.is(zero_reg)));
1474 GenInstrImmediate(ADDI, zero_reg, rt, offset,
1475 CompactBranchType::COMPACT_BRANCH);
1476 }
1477
1478
bnezalc(Register rt,int16_t offset)1479 void Assembler::bnezalc(Register rt, int16_t offset) {
1480 DCHECK(IsMipsArchVariant(kMips32r6));
1481 DCHECK(!(rt.is(zero_reg)));
1482 GenInstrImmediate(DADDI, zero_reg, rt, offset,
1483 CompactBranchType::COMPACT_BRANCH);
1484 }
1485
1486
beqc(Register rs,Register rt,int16_t offset)1487 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1488 DCHECK(IsMipsArchVariant(kMips32r6));
1489 DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1490 if (rs.code() < rt.code()) {
1491 GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1492 } else {
1493 GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1494 }
1495 }
1496
1497
beqzc(Register rs,int32_t offset)1498 void Assembler::beqzc(Register rs, int32_t offset) {
1499 DCHECK(IsMipsArchVariant(kMips32r6));
1500 DCHECK(!(rs.is(zero_reg)));
1501 GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
1502 }
1503
1504
bnec(Register rs,Register rt,int16_t offset)1505 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1506 DCHECK(IsMipsArchVariant(kMips32r6));
1507 DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1508 if (rs.code() < rt.code()) {
1509 GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1510 } else {
1511 GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1512 }
1513 }
1514
1515
bnezc(Register rs,int32_t offset)1516 void Assembler::bnezc(Register rs, int32_t offset) {
1517 DCHECK(IsMipsArchVariant(kMips32r6));
1518 DCHECK(!(rs.is(zero_reg)));
1519 GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
1520 }
1521
1522
j(int32_t target)1523 void Assembler::j(int32_t target) {
1524 #if DEBUG
1525 // Get pc of delay slot.
1526 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1527 bool in_range = ((ipc ^ static_cast<uint32_t>(target)) >>
1528 (kImm26Bits + kImmFieldShift)) == 0;
1529 DCHECK(in_range && ((target & 3) == 0));
1530 #endif
1531 BlockTrampolinePoolScope block_trampoline_pool(this);
1532 GenInstrJump(J, (target >> 2) & kImm26Mask);
1533 BlockTrampolinePoolFor(1); // For associated delay slot.
1534 }
1535
1536
jr(Register rs)1537 void Assembler::jr(Register rs) {
1538 if (!IsMipsArchVariant(kMips32r6)) {
1539 BlockTrampolinePoolScope block_trampoline_pool(this);
1540 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1541 BlockTrampolinePoolFor(1); // For associated delay slot.
1542 } else {
1543 jalr(rs, zero_reg);
1544 }
1545 }
1546
1547
jal(int32_t target)1548 void Assembler::jal(int32_t target) {
1549 #ifdef DEBUG
1550 // Get pc of delay slot.
1551 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1552 bool in_range = ((ipc ^ static_cast<uint32_t>(target)) >>
1553 (kImm26Bits + kImmFieldShift)) == 0;
1554 DCHECK(in_range && ((target & 3) == 0));
1555 #endif
1556 BlockTrampolinePoolScope block_trampoline_pool(this);
1557 GenInstrJump(JAL, (target >> 2) & kImm26Mask);
1558 BlockTrampolinePoolFor(1); // For associated delay slot.
1559 }
1560
1561
jalr(Register rs,Register rd)1562 void Assembler::jalr(Register rs, Register rd) {
1563 DCHECK(rs.code() != rd.code());
1564 BlockTrampolinePoolScope block_trampoline_pool(this);
1565 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1566 BlockTrampolinePoolFor(1); // For associated delay slot.
1567 }
1568
1569
jic(Register rt,int16_t offset)1570 void Assembler::jic(Register rt, int16_t offset) {
1571 DCHECK(IsMipsArchVariant(kMips32r6));
1572 GenInstrImmediate(POP66, zero_reg, rt, offset);
1573 }
1574
1575
jialc(Register rt,int16_t offset)1576 void Assembler::jialc(Register rt, int16_t offset) {
1577 DCHECK(IsMipsArchVariant(kMips32r6));
1578 GenInstrImmediate(POP76, zero_reg, rt, offset);
1579 }
1580
1581
1582 // -------Data-processing-instructions---------
1583
1584 // Arithmetic.
1585
addu(Register rd,Register rs,Register rt)1586 void Assembler::addu(Register rd, Register rs, Register rt) {
1587 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1588 }
1589
1590
addiu(Register rd,Register rs,int32_t j)1591 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1592 GenInstrImmediate(ADDIU, rs, rd, j);
1593 }
1594
1595
subu(Register rd,Register rs,Register rt)1596 void Assembler::subu(Register rd, Register rs, Register rt) {
1597 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1598 }
1599
1600
mul(Register rd,Register rs,Register rt)1601 void Assembler::mul(Register rd, Register rs, Register rt) {
1602 if (!IsMipsArchVariant(kMips32r6)) {
1603 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1604 } else {
1605 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1606 }
1607 }
1608
1609
mulu(Register rd,Register rs,Register rt)1610 void Assembler::mulu(Register rd, Register rs, Register rt) {
1611 DCHECK(IsMipsArchVariant(kMips32r6));
1612 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1613 }
1614
1615
muh(Register rd,Register rs,Register rt)1616 void Assembler::muh(Register rd, Register rs, Register rt) {
1617 DCHECK(IsMipsArchVariant(kMips32r6));
1618 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1619 }
1620
1621
muhu(Register rd,Register rs,Register rt)1622 void Assembler::muhu(Register rd, Register rs, Register rt) {
1623 DCHECK(IsMipsArchVariant(kMips32r6));
1624 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1625 }
1626
1627
mod(Register rd,Register rs,Register rt)1628 void Assembler::mod(Register rd, Register rs, Register rt) {
1629 DCHECK(IsMipsArchVariant(kMips32r6));
1630 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1631 }
1632
1633
modu(Register rd,Register rs,Register rt)1634 void Assembler::modu(Register rd, Register rs, Register rt) {
1635 DCHECK(IsMipsArchVariant(kMips32r6));
1636 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1637 }
1638
1639
mult(Register rs,Register rt)1640 void Assembler::mult(Register rs, Register rt) {
1641 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1642 }
1643
1644
multu(Register rs,Register rt)1645 void Assembler::multu(Register rs, Register rt) {
1646 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1647 }
1648
1649
div(Register rs,Register rt)1650 void Assembler::div(Register rs, Register rt) {
1651 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1652 }
1653
1654
div(Register rd,Register rs,Register rt)1655 void Assembler::div(Register rd, Register rs, Register rt) {
1656 DCHECK(IsMipsArchVariant(kMips32r6));
1657 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1658 }
1659
1660
divu(Register rs,Register rt)1661 void Assembler::divu(Register rs, Register rt) {
1662 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1663 }
1664
1665
divu(Register rd,Register rs,Register rt)1666 void Assembler::divu(Register rd, Register rs, Register rt) {
1667 DCHECK(IsMipsArchVariant(kMips32r6));
1668 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1669 }
1670
1671
1672 // Logical.
1673
and_(Register rd,Register rs,Register rt)1674 void Assembler::and_(Register rd, Register rs, Register rt) {
1675 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1676 }
1677
1678
andi(Register rt,Register rs,int32_t j)1679 void Assembler::andi(Register rt, Register rs, int32_t j) {
1680 DCHECK(is_uint16(j));
1681 GenInstrImmediate(ANDI, rs, rt, j);
1682 }
1683
1684
or_(Register rd,Register rs,Register rt)1685 void Assembler::or_(Register rd, Register rs, Register rt) {
1686 GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1687 }
1688
1689
ori(Register rt,Register rs,int32_t j)1690 void Assembler::ori(Register rt, Register rs, int32_t j) {
1691 DCHECK(is_uint16(j));
1692 GenInstrImmediate(ORI, rs, rt, j);
1693 }
1694
1695
xor_(Register rd,Register rs,Register rt)1696 void Assembler::xor_(Register rd, Register rs, Register rt) {
1697 GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1698 }
1699
1700
xori(Register rt,Register rs,int32_t j)1701 void Assembler::xori(Register rt, Register rs, int32_t j) {
1702 DCHECK(is_uint16(j));
1703 GenInstrImmediate(XORI, rs, rt, j);
1704 }
1705
1706
nor(Register rd,Register rs,Register rt)1707 void Assembler::nor(Register rd, Register rs, Register rt) {
1708 GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1709 }
1710
1711
1712 // Shifts.
sll(Register rd,Register rt,uint16_t sa,bool coming_from_nop)1713 void Assembler::sll(Register rd,
1714 Register rt,
1715 uint16_t sa,
1716 bool coming_from_nop) {
1717 // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1718 // generated using the sll instruction. They must be generated using
1719 // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1720 // instructions.
1721 DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1722 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
1723 }
1724
1725
sllv(Register rd,Register rt,Register rs)1726 void Assembler::sllv(Register rd, Register rt, Register rs) {
1727 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1728 }
1729
1730
srl(Register rd,Register rt,uint16_t sa)1731 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1732 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
1733 }
1734
1735
srlv(Register rd,Register rt,Register rs)1736 void Assembler::srlv(Register rd, Register rt, Register rs) {
1737 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1738 }
1739
1740
sra(Register rd,Register rt,uint16_t sa)1741 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1742 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
1743 }
1744
1745
srav(Register rd,Register rt,Register rs)1746 void Assembler::srav(Register rd, Register rt, Register rs) {
1747 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1748 }
1749
1750
rotr(Register rd,Register rt,uint16_t sa)1751 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1752 // Should be called via MacroAssembler::Ror.
1753 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1754 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1755 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1756 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1757 emit(instr);
1758 }
1759
1760
rotrv(Register rd,Register rt,Register rs)1761 void Assembler::rotrv(Register rd, Register rt, Register rs) {
1762 // Should be called via MacroAssembler::Ror.
1763 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1764 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1765 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1766 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1767 emit(instr);
1768 }
1769
1770
lsa(Register rd,Register rt,Register rs,uint8_t sa)1771 void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
1772 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1773 DCHECK(sa <= 3);
1774 DCHECK(IsMipsArchVariant(kMips32r6));
1775 Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
1776 rd.code() << kRdShift | sa << kSaShift | LSA;
1777 emit(instr);
1778 }
1779
1780
1781 // ------------Memory-instructions-------------
1782
1783 // Helper for base-reg + offset, when offset is larger than int16.
LoadRegPlusOffsetToAt(const MemOperand & src)1784 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1785 DCHECK(!src.rm().is(at));
1786 if (IsMipsArchVariant(kMips32r6)) {
1787 int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
1788 if (src.offset_ & kNegOffset) {
1789 hi += 1;
1790 }
1791 aui(at, src.rm(), hi);
1792 addiu(at, at, src.offset_ & kImm16Mask);
1793 } else {
1794 lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
1795 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1796 addu(at, at, src.rm()); // Add base register.
1797 }
1798 }
1799
1800 // Helper for base-reg + upper part of offset, when offset is larger than int16.
1801 // Loads higher part of the offset to AT register.
1802 // Returns lower part of the offset to be used as offset
1803 // in Load/Store instructions
LoadRegPlusUpperOffsetPartToAt(const MemOperand & src)1804 int32_t Assembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) {
1805 DCHECK(!src.rm().is(at));
1806 int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
1807 // If the highest bit of the lower part of the offset is 1, this would make
1808 // the offset in the load/store instruction negative. We need to compensate
1809 // for this by adding 1 to the upper part of the offset.
1810 if (src.offset_ & kNegOffset) {
1811 hi += 1;
1812 }
1813
1814 if (IsMipsArchVariant(kMips32r6)) {
1815 aui(at, src.rm(), hi);
1816 } else {
1817 lui(at, hi);
1818 addu(at, at, src.rm());
1819 }
1820 return (src.offset_ & kImm16Mask);
1821 }
1822
1823 // Helper for loading base-reg + upper offset's part to AT reg when we are using
1824 // two 32-bit loads/stores instead of one 64-bit
LoadUpperOffsetForTwoMemoryAccesses(const MemOperand & src)1825 int32_t Assembler::LoadUpperOffsetForTwoMemoryAccesses(const MemOperand& src) {
1826 DCHECK(!src.rm().is(at));
1827 if (is_int16((src.offset_ & kImm16Mask) + kIntSize)) {
1828 // Only if lower part of offset + kIntSize fits in 16bits
1829 return LoadRegPlusUpperOffsetPartToAt(src);
1830 }
1831 // In case offset's lower part + kIntSize doesn't fit in 16bits,
1832 // load reg + hole offset to AT
1833 LoadRegPlusOffsetToAt(src);
1834 return 0;
1835 }
1836
lb(Register rd,const MemOperand & rs)1837 void Assembler::lb(Register rd, const MemOperand& rs) {
1838 if (is_int16(rs.offset_)) {
1839 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1840 } else { // Offset > 16 bits, use multiple instructions to load.
1841 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
1842 GenInstrImmediate(LB, at, rd, off16);
1843 }
1844 }
1845
1846
lbu(Register rd,const MemOperand & rs)1847 void Assembler::lbu(Register rd, const MemOperand& rs) {
1848 if (is_int16(rs.offset_)) {
1849 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1850 } else { // Offset > 16 bits, use multiple instructions to load.
1851 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
1852 GenInstrImmediate(LBU, at, rd, off16);
1853 }
1854 }
1855
1856
lh(Register rd,const MemOperand & rs)1857 void Assembler::lh(Register rd, const MemOperand& rs) {
1858 if (is_int16(rs.offset_)) {
1859 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1860 } else { // Offset > 16 bits, use multiple instructions to load.
1861 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
1862 GenInstrImmediate(LH, at, rd, off16);
1863 }
1864 }
1865
1866
lhu(Register rd,const MemOperand & rs)1867 void Assembler::lhu(Register rd, const MemOperand& rs) {
1868 if (is_int16(rs.offset_)) {
1869 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1870 } else { // Offset > 16 bits, use multiple instructions to load.
1871 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
1872 GenInstrImmediate(LHU, at, rd, off16);
1873 }
1874 }
1875
1876
lw(Register rd,const MemOperand & rs)1877 void Assembler::lw(Register rd, const MemOperand& rs) {
1878 if (is_int16(rs.offset_)) {
1879 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1880 } else { // Offset > 16 bits, use multiple instructions to load.
1881 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
1882 GenInstrImmediate(LW, at, rd, off16);
1883 }
1884 }
1885
1886
lwl(Register rd,const MemOperand & rs)1887 void Assembler::lwl(Register rd, const MemOperand& rs) {
1888 DCHECK(is_int16(rs.offset_));
1889 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
1890 IsMipsArchVariant(kMips32r2));
1891 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1892 }
1893
1894
lwr(Register rd,const MemOperand & rs)1895 void Assembler::lwr(Register rd, const MemOperand& rs) {
1896 DCHECK(is_int16(rs.offset_));
1897 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
1898 IsMipsArchVariant(kMips32r2));
1899 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1900 }
1901
1902
sb(Register rd,const MemOperand & rs)1903 void Assembler::sb(Register rd, const MemOperand& rs) {
1904 if (is_int16(rs.offset_)) {
1905 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1906 } else { // Offset > 16 bits, use multiple instructions to store.
1907 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
1908 GenInstrImmediate(SB, at, rd, off16);
1909 }
1910 }
1911
1912
sh(Register rd,const MemOperand & rs)1913 void Assembler::sh(Register rd, const MemOperand& rs) {
1914 if (is_int16(rs.offset_)) {
1915 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1916 } else { // Offset > 16 bits, use multiple instructions to store.
1917 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
1918 GenInstrImmediate(SH, at, rd, off16);
1919 }
1920 }
1921
1922
sw(Register rd,const MemOperand & rs)1923 void Assembler::sw(Register rd, const MemOperand& rs) {
1924 if (is_int16(rs.offset_)) {
1925 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1926 } else { // Offset > 16 bits, use multiple instructions to store.
1927 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
1928 GenInstrImmediate(SW, at, rd, off16);
1929 }
1930 }
1931
1932
swl(Register rd,const MemOperand & rs)1933 void Assembler::swl(Register rd, const MemOperand& rs) {
1934 DCHECK(is_int16(rs.offset_));
1935 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
1936 IsMipsArchVariant(kMips32r2));
1937 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1938 }
1939
1940
swr(Register rd,const MemOperand & rs)1941 void Assembler::swr(Register rd, const MemOperand& rs) {
1942 DCHECK(is_int16(rs.offset_));
1943 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
1944 IsMipsArchVariant(kMips32r2));
1945 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1946 }
1947
1948
lui(Register rd,int32_t j)1949 void Assembler::lui(Register rd, int32_t j) {
1950 DCHECK(is_uint16(j));
1951 GenInstrImmediate(LUI, zero_reg, rd, j);
1952 }
1953
1954
aui(Register rt,Register rs,int32_t j)1955 void Assembler::aui(Register rt, Register rs, int32_t j) {
1956 // This instruction uses same opcode as 'lui'. The difference in encoding is
1957 // 'lui' has zero reg. for rs field.
1958 DCHECK(!(rs.is(zero_reg)));
1959 DCHECK(is_uint16(j));
1960 GenInstrImmediate(LUI, rs, rt, j);
1961 }
1962
1963 // ---------PC-Relative instructions-----------
1964
addiupc(Register rs,int32_t imm19)1965 void Assembler::addiupc(Register rs, int32_t imm19) {
1966 DCHECK(IsMipsArchVariant(kMips32r6));
1967 DCHECK(rs.is_valid() && is_int19(imm19));
1968 uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
1969 GenInstrImmediate(PCREL, rs, imm21);
1970 }
1971
1972
lwpc(Register rs,int32_t offset19)1973 void Assembler::lwpc(Register rs, int32_t offset19) {
1974 DCHECK(IsMipsArchVariant(kMips32r6));
1975 DCHECK(rs.is_valid() && is_int19(offset19));
1976 uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
1977 GenInstrImmediate(PCREL, rs, imm21);
1978 }
1979
1980
auipc(Register rs,int16_t imm16)1981 void Assembler::auipc(Register rs, int16_t imm16) {
1982 DCHECK(IsMipsArchVariant(kMips32r6));
1983 DCHECK(rs.is_valid());
1984 uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
1985 GenInstrImmediate(PCREL, rs, imm21);
1986 }
1987
1988
aluipc(Register rs,int16_t imm16)1989 void Assembler::aluipc(Register rs, int16_t imm16) {
1990 DCHECK(IsMipsArchVariant(kMips32r6));
1991 DCHECK(rs.is_valid());
1992 uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
1993 GenInstrImmediate(PCREL, rs, imm21);
1994 }
1995
1996
1997 // -------------Misc-instructions--------------
1998
1999 // Break / Trap instructions.
break_(uint32_t code,bool break_as_stop)2000 void Assembler::break_(uint32_t code, bool break_as_stop) {
2001 DCHECK((code & ~0xfffff) == 0);
2002 // We need to invalidate breaks that could be stops as well because the
2003 // simulator expects a char pointer after the stop instruction.
2004 // See constants-mips.h for explanation.
2005 DCHECK((break_as_stop &&
2006 code <= kMaxStopCode &&
2007 code > kMaxWatchpointCode) ||
2008 (!break_as_stop &&
2009 (code > kMaxStopCode ||
2010 code <= kMaxWatchpointCode)));
2011 Instr break_instr = SPECIAL | BREAK | (code << 6);
2012 emit(break_instr);
2013 }
2014
2015
stop(const char * msg,uint32_t code)2016 void Assembler::stop(const char* msg, uint32_t code) {
2017 DCHECK(code > kMaxWatchpointCode);
2018 DCHECK(code <= kMaxStopCode);
2019 #if V8_HOST_ARCH_MIPS
2020 break_(0x54321);
2021 #else // V8_HOST_ARCH_MIPS
2022 BlockTrampolinePoolFor(2);
2023 // The Simulator will handle the stop instruction and get the message address.
2024 // On MIPS stop() is just a special kind of break_().
2025 break_(code, true);
2026 // Do not embed the message string address! We used to do this, but that
2027 // made snapshots created from position-independent executable builds
2028 // non-deterministic.
2029 // TODO(yangguo): remove this field entirely.
2030 nop();
2031 #endif
2032 }
2033
2034
tge(Register rs,Register rt,uint16_t code)2035 void Assembler::tge(Register rs, Register rt, uint16_t code) {
2036 DCHECK(is_uint10(code));
2037 Instr instr = SPECIAL | TGE | rs.code() << kRsShift
2038 | rt.code() << kRtShift | code << 6;
2039 emit(instr);
2040 }
2041
2042
tgeu(Register rs,Register rt,uint16_t code)2043 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
2044 DCHECK(is_uint10(code));
2045 Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
2046 | rt.code() << kRtShift | code << 6;
2047 emit(instr);
2048 }
2049
2050
tlt(Register rs,Register rt,uint16_t code)2051 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
2052 DCHECK(is_uint10(code));
2053 Instr instr =
2054 SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2055 emit(instr);
2056 }
2057
2058
tltu(Register rs,Register rt,uint16_t code)2059 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
2060 DCHECK(is_uint10(code));
2061 Instr instr =
2062 SPECIAL | TLTU | rs.code() << kRsShift
2063 | rt.code() << kRtShift | code << 6;
2064 emit(instr);
2065 }
2066
2067
teq(Register rs,Register rt,uint16_t code)2068 void Assembler::teq(Register rs, Register rt, uint16_t code) {
2069 DCHECK(is_uint10(code));
2070 Instr instr =
2071 SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2072 emit(instr);
2073 }
2074
2075
tne(Register rs,Register rt,uint16_t code)2076 void Assembler::tne(Register rs, Register rt, uint16_t code) {
2077 DCHECK(is_uint10(code));
2078 Instr instr =
2079 SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2080 emit(instr);
2081 }
2082
sync()2083 void Assembler::sync() {
2084 Instr sync_instr = SPECIAL | SYNC;
2085 emit(sync_instr);
2086 }
2087
2088 // Move from HI/LO register.
2089
mfhi(Register rd)2090 void Assembler::mfhi(Register rd) {
2091 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
2092 }
2093
2094
mflo(Register rd)2095 void Assembler::mflo(Register rd) {
2096 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
2097 }
2098
2099
2100 // Set on less than instructions.
slt(Register rd,Register rs,Register rt)2101 void Assembler::slt(Register rd, Register rs, Register rt) {
2102 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
2103 }
2104
2105
sltu(Register rd,Register rs,Register rt)2106 void Assembler::sltu(Register rd, Register rs, Register rt) {
2107 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
2108 }
2109
2110
slti(Register rt,Register rs,int32_t j)2111 void Assembler::slti(Register rt, Register rs, int32_t j) {
2112 GenInstrImmediate(SLTI, rs, rt, j);
2113 }
2114
2115
sltiu(Register rt,Register rs,int32_t j)2116 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
2117 GenInstrImmediate(SLTIU, rs, rt, j);
2118 }
2119
2120
2121 // Conditional move.
movz(Register rd,Register rs,Register rt)2122 void Assembler::movz(Register rd, Register rs, Register rt) {
2123 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2124 }
2125
2126
movn(Register rd,Register rs,Register rt)2127 void Assembler::movn(Register rd, Register rs, Register rt) {
2128 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2129 }
2130
2131
movt(Register rd,Register rs,uint16_t cc)2132 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2133 Register rt;
2134 rt.reg_code = (cc & 0x0007) << 2 | 1;
2135 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2136 }
2137
2138
movf(Register rd,Register rs,uint16_t cc)2139 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2140 Register rt;
2141 rt.reg_code = (cc & 0x0007) << 2 | 0;
2142 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2143 }
2144
2145
seleqz(Register rd,Register rs,Register rt)2146 void Assembler::seleqz(Register rd, Register rs, Register rt) {
2147 DCHECK(IsMipsArchVariant(kMips32r6));
2148 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2149 }
2150
2151
2152 // Bit twiddling.
clz(Register rd,Register rs)2153 void Assembler::clz(Register rd, Register rs) {
2154 if (!IsMipsArchVariant(kMips32r6)) {
2155 // Clz instr requires same GPR number in 'rd' and 'rt' fields.
2156 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2157 } else {
2158 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2159 }
2160 }
2161
2162
ins_(Register rt,Register rs,uint16_t pos,uint16_t size)2163 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2164 // Should be called via MacroAssembler::Ins.
2165 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2166 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2167 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2168 }
2169
2170
ext_(Register rt,Register rs,uint16_t pos,uint16_t size)2171 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2172 // Should be called via MacroAssembler::Ext.
2173 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
2174 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2175 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2176 }
2177
2178
bitswap(Register rd,Register rt)2179 void Assembler::bitswap(Register rd, Register rt) {
2180 DCHECK(IsMipsArchVariant(kMips32r6));
2181 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
2182 }
2183
2184
pref(int32_t hint,const MemOperand & rs)2185 void Assembler::pref(int32_t hint, const MemOperand& rs) {
2186 DCHECK(!IsMipsArchVariant(kLoongson));
2187 DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2188 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
2189 | (rs.offset_);
2190 emit(instr);
2191 }
2192
2193
align(Register rd,Register rs,Register rt,uint8_t bp)2194 void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
2195 DCHECK(IsMipsArchVariant(kMips32r6));
2196 DCHECK(is_uint3(bp));
2197 uint16_t sa = (ALIGN << kBp2Bits) | bp;
2198 GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
2199 }
2200
2201 // Byte swap.
wsbh(Register rd,Register rt)2202 void Assembler::wsbh(Register rd, Register rt) {
2203 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2204 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL);
2205 }
2206
seh(Register rd,Register rt)2207 void Assembler::seh(Register rd, Register rt) {
2208 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2209 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL);
2210 }
2211
seb(Register rd,Register rt)2212 void Assembler::seb(Register rd, Register rt) {
2213 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2214 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
2215 }
2216
2217 // --------Coprocessor-instructions----------------
2218
2219 // Load, store, move.
lwc1(FPURegister fd,const MemOperand & src)2220 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
2221 if (is_int16(src.offset_)) {
2222 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
2223 } else { // Offset > 16 bits, use multiple instructions to load.
2224 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
2225 GenInstrImmediate(LWC1, at, fd, off16);
2226 }
2227 }
2228
2229
ldc1(FPURegister fd,const MemOperand & src)2230 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
2231 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
2232 // load to two 32-bit loads.
2233 if (IsFp32Mode()) { // fp32 mode.
2234 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2235 GenInstrImmediate(LWC1, src.rm(), fd,
2236 src.offset_ + Register::kMantissaOffset);
2237 FPURegister nextfpreg;
2238 nextfpreg.setcode(fd.code() + 1);
2239 GenInstrImmediate(LWC1, src.rm(), nextfpreg,
2240 src.offset_ + Register::kExponentOffset);
2241 } else { // Offset > 16 bits, use multiple instructions to load.
2242 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
2243 GenInstrImmediate(LWC1, at, fd, off16 + Register::kMantissaOffset);
2244 FPURegister nextfpreg;
2245 nextfpreg.setcode(fd.code() + 1);
2246 GenInstrImmediate(LWC1, at, nextfpreg, off16 + Register::kExponentOffset);
2247 }
2248 } else {
2249 DCHECK(IsFp64Mode() || IsFpxxMode());
2250 // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
2251 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2252 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2253 GenInstrImmediate(LWC1, src.rm(), fd,
2254 src.offset_ + Register::kMantissaOffset);
2255 GenInstrImmediate(LW, src.rm(), at,
2256 src.offset_ + Register::kExponentOffset);
2257 mthc1(at, fd);
2258 } else { // Offset > 16 bits, use multiple instructions to load.
2259 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
2260 GenInstrImmediate(LWC1, at, fd, off16 + Register::kMantissaOffset);
2261 GenInstrImmediate(LW, at, at, off16 + Register::kExponentOffset);
2262 mthc1(at, fd);
2263 }
2264 }
2265 }
2266
2267
swc1(FPURegister fd,const MemOperand & src)2268 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
2269 if (is_int16(src.offset_)) {
2270 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
2271 } else { // Offset > 16 bits, use multiple instructions to load.
2272 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
2273 GenInstrImmediate(SWC1, at, fd, off16);
2274 }
2275 }
2276
2277
sdc1(FPURegister fd,const MemOperand & src)2278 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
2279 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
2280 // store to two 32-bit stores.
2281 DCHECK(!src.rm().is(at));
2282 DCHECK(!src.rm().is(t8));
2283 if (IsFp32Mode()) { // fp32 mode.
2284 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2285 GenInstrImmediate(SWC1, src.rm(), fd,
2286 src.offset_ + Register::kMantissaOffset);
2287 FPURegister nextfpreg;
2288 nextfpreg.setcode(fd.code() + 1);
2289 GenInstrImmediate(SWC1, src.rm(), nextfpreg,
2290 src.offset_ + Register::kExponentOffset);
2291 } else { // Offset > 16 bits, use multiple instructions to load.
2292 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
2293 GenInstrImmediate(SWC1, at, fd, off16 + Register::kMantissaOffset);
2294 FPURegister nextfpreg;
2295 nextfpreg.setcode(fd.code() + 1);
2296 GenInstrImmediate(SWC1, at, nextfpreg, off16 + Register::kExponentOffset);
2297 }
2298 } else {
2299 DCHECK(IsFp64Mode() || IsFpxxMode());
2300 // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
2301 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2302 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2303 GenInstrImmediate(SWC1, src.rm(), fd,
2304 src.offset_ + Register::kMantissaOffset);
2305 mfhc1(at, fd);
2306 GenInstrImmediate(SW, src.rm(), at,
2307 src.offset_ + Register::kExponentOffset);
2308 } else { // Offset > 16 bits, use multiple instructions to load.
2309 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
2310 GenInstrImmediate(SWC1, at, fd, off16 + Register::kMantissaOffset);
2311 mfhc1(t8, fd);
2312 GenInstrImmediate(SW, at, t8, off16 + Register::kExponentOffset);
2313 }
2314 }
2315 }
2316
2317
mtc1(Register rt,FPURegister fs)2318 void Assembler::mtc1(Register rt, FPURegister fs) {
2319 GenInstrRegister(COP1, MTC1, rt, fs, f0);
2320 }
2321
2322
mthc1(Register rt,FPURegister fs)2323 void Assembler::mthc1(Register rt, FPURegister fs) {
2324 GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2325 }
2326
2327
mfc1(Register rt,FPURegister fs)2328 void Assembler::mfc1(Register rt, FPURegister fs) {
2329 GenInstrRegister(COP1, MFC1, rt, fs, f0);
2330 }
2331
2332
mfhc1(Register rt,FPURegister fs)2333 void Assembler::mfhc1(Register rt, FPURegister fs) {
2334 GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2335 }
2336
2337
ctc1(Register rt,FPUControlRegister fs)2338 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2339 GenInstrRegister(COP1, CTC1, rt, fs);
2340 }
2341
2342
cfc1(Register rt,FPUControlRegister fs)2343 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2344 GenInstrRegister(COP1, CFC1, rt, fs);
2345 }
2346
2347
DoubleAsTwoUInt32(double d,uint32_t * lo,uint32_t * hi)2348 void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2349 uint64_t i;
2350 memcpy(&i, &d, 8);
2351
2352 *lo = i & 0xffffffff;
2353 *hi = i >> 32;
2354 }
2355
2356
movn_s(FPURegister fd,FPURegister fs,Register rt)2357 void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
2358 DCHECK(!IsMipsArchVariant(kMips32r6));
2359 GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
2360 }
2361
2362
movn_d(FPURegister fd,FPURegister fs,Register rt)2363 void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
2364 DCHECK(!IsMipsArchVariant(kMips32r6));
2365 GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
2366 }
2367
2368
sel(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2369 void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
2370 FPURegister ft) {
2371 DCHECK(IsMipsArchVariant(kMips32r6));
2372 DCHECK((fmt == D) || (fmt == S));
2373
2374 GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
2375 }
2376
2377
sel_s(FPURegister fd,FPURegister fs,FPURegister ft)2378 void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2379 sel(S, fd, fs, ft);
2380 }
2381
2382
sel_d(FPURegister fd,FPURegister fs,FPURegister ft)2383 void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2384 sel(D, fd, fs, ft);
2385 }
2386
2387
seleqz(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2388 void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
2389 FPURegister ft) {
2390 DCHECK(IsMipsArchVariant(kMips32r6));
2391 DCHECK((fmt == D) || (fmt == S));
2392 GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
2393 }
2394
2395
selnez(Register rd,Register rs,Register rt)2396 void Assembler::selnez(Register rd, Register rs, Register rt) {
2397 DCHECK(IsMipsArchVariant(kMips32r6));
2398 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2399 }
2400
2401
selnez(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2402 void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
2403 FPURegister ft) {
2404 DCHECK(IsMipsArchVariant(kMips32r6));
2405 DCHECK((fmt == D) || (fmt == S));
2406 GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
2407 }
2408
2409
seleqz_d(FPURegister fd,FPURegister fs,FPURegister ft)2410 void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2411 seleqz(D, fd, fs, ft);
2412 }
2413
2414
seleqz_s(FPURegister fd,FPURegister fs,FPURegister ft)2415 void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2416 seleqz(S, fd, fs, ft);
2417 }
2418
2419
selnez_d(FPURegister fd,FPURegister fs,FPURegister ft)2420 void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2421 selnez(D, fd, fs, ft);
2422 }
2423
2424
selnez_s(FPURegister fd,FPURegister fs,FPURegister ft)2425 void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2426 selnez(S, fd, fs, ft);
2427 }
2428
2429
movz_s(FPURegister fd,FPURegister fs,Register rt)2430 void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
2431 DCHECK(!IsMipsArchVariant(kMips32r6));
2432 GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
2433 }
2434
2435
movz_d(FPURegister fd,FPURegister fs,Register rt)2436 void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
2437 DCHECK(!IsMipsArchVariant(kMips32r6));
2438 GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
2439 }
2440
2441
movt_s(FPURegister fd,FPURegister fs,uint16_t cc)2442 void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2443 DCHECK(!IsMipsArchVariant(kMips32r6));
2444 FPURegister ft;
2445 ft.reg_code = (cc & 0x0007) << 2 | 1;
2446 GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2447 }
2448
2449
movt_d(FPURegister fd,FPURegister fs,uint16_t cc)2450 void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2451 DCHECK(!IsMipsArchVariant(kMips32r6));
2452 FPURegister ft;
2453 ft.reg_code = (cc & 0x0007) << 2 | 1;
2454 GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2455 }
2456
2457
movf_s(FPURegister fd,FPURegister fs,uint16_t cc)2458 void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2459 DCHECK(!IsMipsArchVariant(kMips32r6));
2460 FPURegister ft;
2461 ft.reg_code = (cc & 0x0007) << 2 | 0;
2462 GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2463 }
2464
2465
movf_d(FPURegister fd,FPURegister fs,uint16_t cc)2466 void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2467 DCHECK(!IsMipsArchVariant(kMips32r6));
2468 FPURegister ft;
2469 ft.reg_code = (cc & 0x0007) << 2 | 0;
2470 GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2471 }
2472
2473
2474 // Arithmetic.
2475
add_s(FPURegister fd,FPURegister fs,FPURegister ft)2476 void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2477 GenInstrRegister(COP1, S, ft, fs, fd, ADD_S);
2478 }
2479
2480
add_d(FPURegister fd,FPURegister fs,FPURegister ft)2481 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2482 GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2483 }
2484
2485
sub_s(FPURegister fd,FPURegister fs,FPURegister ft)2486 void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2487 GenInstrRegister(COP1, S, ft, fs, fd, SUB_S);
2488 }
2489
2490
sub_d(FPURegister fd,FPURegister fs,FPURegister ft)2491 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2492 GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2493 }
2494
2495
mul_s(FPURegister fd,FPURegister fs,FPURegister ft)2496 void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2497 GenInstrRegister(COP1, S, ft, fs, fd, MUL_S);
2498 }
2499
2500
mul_d(FPURegister fd,FPURegister fs,FPURegister ft)2501 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2502 GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2503 }
2504
madd_s(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2505 void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
2506 FPURegister ft) {
2507 DCHECK(IsMipsArchVariant(kMips32r2));
2508 GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_S);
2509 }
2510
madd_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2511 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2512 FPURegister ft) {
2513 DCHECK(IsMipsArchVariant(kMips32r2));
2514 GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
2515 }
2516
msub_s(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2517 void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
2518 FPURegister ft) {
2519 DCHECK(IsMipsArchVariant(kMips32r2));
2520 GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_S);
2521 }
2522
msub_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2523 void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
2524 FPURegister ft) {
2525 DCHECK(IsMipsArchVariant(kMips32r2));
2526 GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_D);
2527 }
2528
maddf_s(FPURegister fd,FPURegister fs,FPURegister ft)2529 void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2530 DCHECK(IsMipsArchVariant(kMips32r6));
2531 GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S);
2532 }
2533
maddf_d(FPURegister fd,FPURegister fs,FPURegister ft)2534 void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2535 DCHECK(IsMipsArchVariant(kMips32r6));
2536 GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D);
2537 }
2538
msubf_s(FPURegister fd,FPURegister fs,FPURegister ft)2539 void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2540 DCHECK(IsMipsArchVariant(kMips32r6));
2541 GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S);
2542 }
2543
msubf_d(FPURegister fd,FPURegister fs,FPURegister ft)2544 void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2545 DCHECK(IsMipsArchVariant(kMips32r6));
2546 GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D);
2547 }
2548
div_s(FPURegister fd,FPURegister fs,FPURegister ft)2549 void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2550 GenInstrRegister(COP1, S, ft, fs, fd, DIV_S);
2551 }
2552
2553
div_d(FPURegister fd,FPURegister fs,FPURegister ft)2554 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2555 GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2556 }
2557
2558
abs_s(FPURegister fd,FPURegister fs)2559 void Assembler::abs_s(FPURegister fd, FPURegister fs) {
2560 GenInstrRegister(COP1, S, f0, fs, fd, ABS_S);
2561 }
2562
2563
abs_d(FPURegister fd,FPURegister fs)2564 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2565 GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2566 }
2567
2568
mov_d(FPURegister fd,FPURegister fs)2569 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2570 GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2571 }
2572
2573
mov_s(FPURegister fd,FPURegister fs)2574 void Assembler::mov_s(FPURegister fd, FPURegister fs) {
2575 GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
2576 }
2577
2578
neg_s(FPURegister fd,FPURegister fs)2579 void Assembler::neg_s(FPURegister fd, FPURegister fs) {
2580 GenInstrRegister(COP1, S, f0, fs, fd, NEG_S);
2581 }
2582
2583
neg_d(FPURegister fd,FPURegister fs)2584 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2585 GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2586 }
2587
2588
sqrt_s(FPURegister fd,FPURegister fs)2589 void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
2590 GenInstrRegister(COP1, S, f0, fs, fd, SQRT_S);
2591 }
2592
2593
sqrt_d(FPURegister fd,FPURegister fs)2594 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2595 GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
2596 }
2597
2598
rsqrt_s(FPURegister fd,FPURegister fs)2599 void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
2600 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2601 GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
2602 }
2603
2604
rsqrt_d(FPURegister fd,FPURegister fs)2605 void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
2606 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2607 GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
2608 }
2609
2610
recip_d(FPURegister fd,FPURegister fs)2611 void Assembler::recip_d(FPURegister fd, FPURegister fs) {
2612 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2613 GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
2614 }
2615
2616
recip_s(FPURegister fd,FPURegister fs)2617 void Assembler::recip_s(FPURegister fd, FPURegister fs) {
2618 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2619 GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
2620 }
2621
2622
2623 // Conversions.
2624
cvt_w_s(FPURegister fd,FPURegister fs)2625 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2626 GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2627 }
2628
2629
cvt_w_d(FPURegister fd,FPURegister fs)2630 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2631 GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2632 }
2633
2634
trunc_w_s(FPURegister fd,FPURegister fs)2635 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2636 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2637 }
2638
2639
trunc_w_d(FPURegister fd,FPURegister fs)2640 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2641 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2642 }
2643
2644
round_w_s(FPURegister fd,FPURegister fs)2645 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2646 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2647 }
2648
2649
round_w_d(FPURegister fd,FPURegister fs)2650 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2651 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2652 }
2653
2654
floor_w_s(FPURegister fd,FPURegister fs)2655 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2656 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2657 }
2658
2659
floor_w_d(FPURegister fd,FPURegister fs)2660 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2661 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2662 }
2663
2664
ceil_w_s(FPURegister fd,FPURegister fs)2665 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2666 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2667 }
2668
2669
ceil_w_d(FPURegister fd,FPURegister fs)2670 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2671 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2672 }
2673
2674
rint_s(FPURegister fd,FPURegister fs)2675 void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
2676
2677
rint(SecondaryField fmt,FPURegister fd,FPURegister fs)2678 void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
2679 DCHECK(IsMipsArchVariant(kMips32r6));
2680 DCHECK((fmt == D) || (fmt == S));
2681 GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
2682 }
2683
2684
rint_d(FPURegister fd,FPURegister fs)2685 void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
2686
2687
cvt_l_s(FPURegister fd,FPURegister fs)2688 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
2689 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2690 IsFp64Mode());
2691 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
2692 }
2693
2694
cvt_l_d(FPURegister fd,FPURegister fs)2695 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
2696 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2697 IsFp64Mode());
2698 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
2699 }
2700
2701
trunc_l_s(FPURegister fd,FPURegister fs)2702 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
2703 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2704 IsFp64Mode());
2705 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
2706 }
2707
2708
trunc_l_d(FPURegister fd,FPURegister fs)2709 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
2710 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2711 IsFp64Mode());
2712 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
2713 }
2714
2715
round_l_s(FPURegister fd,FPURegister fs)2716 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
2717 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2718 IsFp64Mode());
2719 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
2720 }
2721
2722
round_l_d(FPURegister fd,FPURegister fs)2723 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
2724 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2725 IsFp64Mode());
2726 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
2727 }
2728
2729
floor_l_s(FPURegister fd,FPURegister fs)2730 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
2731 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2732 IsFp64Mode());
2733 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
2734 }
2735
2736
floor_l_d(FPURegister fd,FPURegister fs)2737 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
2738 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2739 IsFp64Mode());
2740 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
2741 }
2742
2743
ceil_l_s(FPURegister fd,FPURegister fs)2744 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
2745 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2746 IsFp64Mode());
2747 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
2748 }
2749
2750
ceil_l_d(FPURegister fd,FPURegister fs)2751 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
2752 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2753 IsFp64Mode());
2754 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
2755 }
2756
2757
class_s(FPURegister fd,FPURegister fs)2758 void Assembler::class_s(FPURegister fd, FPURegister fs) {
2759 DCHECK(IsMipsArchVariant(kMips32r6));
2760 GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
2761 }
2762
2763
class_d(FPURegister fd,FPURegister fs)2764 void Assembler::class_d(FPURegister fd, FPURegister fs) {
2765 DCHECK(IsMipsArchVariant(kMips32r6));
2766 GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
2767 }
2768
2769
min(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2770 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
2771 FPURegister ft) {
2772 DCHECK(IsMipsArchVariant(kMips32r6));
2773 DCHECK((fmt == D) || (fmt == S));
2774 GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2775 }
2776
2777
mina(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2778 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
2779 FPURegister ft) {
2780 DCHECK(IsMipsArchVariant(kMips32r6));
2781 DCHECK((fmt == D) || (fmt == S));
2782 GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
2783 }
2784
2785
max(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2786 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
2787 FPURegister ft) {
2788 DCHECK(IsMipsArchVariant(kMips32r6));
2789 DCHECK((fmt == D) || (fmt == S));
2790 GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2791 }
2792
2793
maxa(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2794 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
2795 FPURegister ft) {
2796 DCHECK(IsMipsArchVariant(kMips32r6));
2797 DCHECK((fmt == D) || (fmt == S));
2798 GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
2799 }
2800
2801
min_s(FPURegister fd,FPURegister fs,FPURegister ft)2802 void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2803 min(S, fd, fs, ft);
2804 }
2805
2806
min_d(FPURegister fd,FPURegister fs,FPURegister ft)2807 void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2808 min(D, fd, fs, ft);
2809 }
2810
2811
max_s(FPURegister fd,FPURegister fs,FPURegister ft)2812 void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2813 max(S, fd, fs, ft);
2814 }
2815
2816
max_d(FPURegister fd,FPURegister fs,FPURegister ft)2817 void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2818 max(D, fd, fs, ft);
2819 }
2820
2821
mina_s(FPURegister fd,FPURegister fs,FPURegister ft)2822 void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2823 mina(S, fd, fs, ft);
2824 }
2825
2826
mina_d(FPURegister fd,FPURegister fs,FPURegister ft)2827 void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2828 mina(D, fd, fs, ft);
2829 }
2830
2831
maxa_s(FPURegister fd,FPURegister fs,FPURegister ft)2832 void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2833 maxa(S, fd, fs, ft);
2834 }
2835
2836
maxa_d(FPURegister fd,FPURegister fs,FPURegister ft)2837 void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2838 maxa(D, fd, fs, ft);
2839 }
2840
2841
cvt_s_w(FPURegister fd,FPURegister fs)2842 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
2843 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
2844 }
2845
2846
cvt_s_l(FPURegister fd,FPURegister fs)2847 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
2848 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2849 IsFp64Mode());
2850 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
2851 }
2852
2853
cvt_s_d(FPURegister fd,FPURegister fs)2854 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
2855 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
2856 }
2857
2858
cvt_d_w(FPURegister fd,FPURegister fs)2859 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
2860 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
2861 }
2862
2863
cvt_d_l(FPURegister fd,FPURegister fs)2864 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
2865 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2866 IsFp64Mode());
2867 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
2868 }
2869
2870
cvt_d_s(FPURegister fd,FPURegister fs)2871 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
2872 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
2873 }
2874
2875
2876 // Conditions for >= MIPSr6.
cmp(FPUCondition cond,SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2877 void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
2878 FPURegister fd, FPURegister fs, FPURegister ft) {
2879 DCHECK(IsMipsArchVariant(kMips32r6));
2880 DCHECK((fmt & ~(31 << kRsShift)) == 0);
2881 Instr instr = COP1 | fmt | ft.code() << kFtShift |
2882 fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
2883 emit(instr);
2884 }
2885
2886
cmp_s(FPUCondition cond,FPURegister fd,FPURegister fs,FPURegister ft)2887 void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
2888 FPURegister ft) {
2889 cmp(cond, W, fd, fs, ft);
2890 }
2891
cmp_d(FPUCondition cond,FPURegister fd,FPURegister fs,FPURegister ft)2892 void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
2893 FPURegister ft) {
2894 cmp(cond, L, fd, fs, ft);
2895 }
2896
2897
bc1eqz(int16_t offset,FPURegister ft)2898 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
2899 DCHECK(IsMipsArchVariant(kMips32r6));
2900 Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
2901 emit(instr);
2902 }
2903
2904
bc1nez(int16_t offset,FPURegister ft)2905 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
2906 DCHECK(IsMipsArchVariant(kMips32r6));
2907 Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
2908 emit(instr);
2909 }
2910
2911
2912 // Conditions for < MIPSr6.
c(FPUCondition cond,SecondaryField fmt,FPURegister fs,FPURegister ft,uint16_t cc)2913 void Assembler::c(FPUCondition cond, SecondaryField fmt,
2914 FPURegister fs, FPURegister ft, uint16_t cc) {
2915 DCHECK(is_uint3(cc));
2916 DCHECK(fmt == S || fmt == D);
2917 DCHECK((fmt & ~(31 << kRsShift)) == 0);
2918 Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
2919 | cc << 8 | 3 << 4 | cond;
2920 emit(instr);
2921 }
2922
2923
c_s(FPUCondition cond,FPURegister fs,FPURegister ft,uint16_t cc)2924 void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
2925 uint16_t cc) {
2926 c(cond, S, fs, ft, cc);
2927 }
2928
2929
c_d(FPUCondition cond,FPURegister fs,FPURegister ft,uint16_t cc)2930 void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
2931 uint16_t cc) {
2932 c(cond, D, fs, ft, cc);
2933 }
2934
2935
fcmp(FPURegister src1,const double src2,FPUCondition cond)2936 void Assembler::fcmp(FPURegister src1, const double src2,
2937 FPUCondition cond) {
2938 DCHECK(src2 == 0.0);
2939 mtc1(zero_reg, f14);
2940 cvt_d_w(f14, f14);
2941 c(cond, D, src1, f14, 0);
2942 }
2943
2944
bc1f(int16_t offset,uint16_t cc)2945 void Assembler::bc1f(int16_t offset, uint16_t cc) {
2946 DCHECK(is_uint3(cc));
2947 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
2948 emit(instr);
2949 }
2950
2951
bc1t(int16_t offset,uint16_t cc)2952 void Assembler::bc1t(int16_t offset, uint16_t cc) {
2953 DCHECK(is_uint3(cc));
2954 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
2955 emit(instr);
2956 }
2957
2958
RelocateInternalReference(RelocInfo::Mode rmode,byte * pc,intptr_t pc_delta)2959 int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
2960 intptr_t pc_delta) {
2961 Instr instr = instr_at(pc);
2962
2963 if (RelocInfo::IsInternalReference(rmode)) {
2964 int32_t* p = reinterpret_cast<int32_t*>(pc);
2965 if (*p == 0) {
2966 return 0; // Number of instructions patched.
2967 }
2968 *p += pc_delta;
2969 return 1; // Number of instructions patched.
2970 } else {
2971 DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
2972 if (IsLui(instr)) {
2973 Instr instr1 = instr_at(pc + 0 * Assembler::kInstrSize);
2974 Instr instr2 = instr_at(pc + 1 * Assembler::kInstrSize);
2975 DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
2976 int32_t imm;
2977 if (IsJicOrJialc(instr2)) {
2978 imm = CreateTargetAddress(instr1, instr2);
2979 } else {
2980 imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
2981 imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
2982 }
2983
2984 if (imm == kEndOfJumpChain) {
2985 return 0; // Number of instructions patched.
2986 }
2987 imm += pc_delta;
2988 DCHECK((imm & 3) == 0);
2989 instr1 &= ~kImm16Mask;
2990 instr2 &= ~kImm16Mask;
2991
2992 if (IsJicOrJialc(instr2)) {
2993 uint32_t lui_offset_u, jic_offset_u;
2994 Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
2995 instr_at_put(pc + 0 * Assembler::kInstrSize, instr1 | lui_offset_u);
2996 instr_at_put(pc + 1 * Assembler::kInstrSize, instr2 | jic_offset_u);
2997 } else {
2998 instr_at_put(pc + 0 * Assembler::kInstrSize,
2999 instr1 | ((imm >> kLuiShift) & kImm16Mask));
3000 instr_at_put(pc + 1 * Assembler::kInstrSize,
3001 instr2 | (imm & kImm16Mask));
3002 }
3003 return 2; // Number of instructions patched.
3004 } else {
3005 UNREACHABLE();
3006 return 0;
3007 }
3008 }
3009 }
3010
3011
GrowBuffer()3012 void Assembler::GrowBuffer() {
3013 if (!own_buffer_) FATAL("external code buffer is too small");
3014
3015 // Compute new buffer size.
3016 CodeDesc desc; // The new buffer.
3017 if (buffer_size_ < 1 * MB) {
3018 desc.buffer_size = 2*buffer_size_;
3019 } else {
3020 desc.buffer_size = buffer_size_ + 1*MB;
3021 }
3022 CHECK_GT(desc.buffer_size, 0); // No overflow.
3023
3024 // Set up new buffer.
3025 desc.buffer = NewArray<byte>(desc.buffer_size);
3026 desc.origin = this;
3027
3028 desc.instr_size = pc_offset();
3029 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
3030
3031 // Copy the data.
3032 int pc_delta = desc.buffer - buffer_;
3033 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
3034 MemMove(desc.buffer, buffer_, desc.instr_size);
3035 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
3036 desc.reloc_size);
3037
3038 // Switch buffers.
3039 DeleteArray(buffer_);
3040 buffer_ = desc.buffer;
3041 buffer_size_ = desc.buffer_size;
3042 pc_ += pc_delta;
3043 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3044 reloc_info_writer.last_pc() + pc_delta);
3045
3046 // Relocate runtime entries.
3047 for (RelocIterator it(desc); !it.done(); it.next()) {
3048 RelocInfo::Mode rmode = it.rinfo()->rmode();
3049 if (rmode == RelocInfo::INTERNAL_REFERENCE_ENCODED ||
3050 rmode == RelocInfo::INTERNAL_REFERENCE) {
3051 byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
3052 RelocateInternalReference(rmode, p, pc_delta);
3053 }
3054 }
3055 DCHECK(!overflow());
3056 }
3057
3058
db(uint8_t data)3059 void Assembler::db(uint8_t data) {
3060 CheckForEmitInForbiddenSlot();
3061 EmitHelper(data);
3062 }
3063
3064
dd(uint32_t data)3065 void Assembler::dd(uint32_t data) {
3066 CheckForEmitInForbiddenSlot();
3067 EmitHelper(data);
3068 }
3069
3070
dq(uint64_t data)3071 void Assembler::dq(uint64_t data) {
3072 CheckForEmitInForbiddenSlot();
3073 EmitHelper(data);
3074 }
3075
3076
dd(Label * label)3077 void Assembler::dd(Label* label) {
3078 uint32_t data;
3079 CheckForEmitInForbiddenSlot();
3080 if (label->is_bound()) {
3081 data = reinterpret_cast<uint32_t>(buffer_ + label->pos());
3082 } else {
3083 data = jump_address(label);
3084 unbound_labels_count_++;
3085 internal_reference_positions_.insert(label->pos());
3086 }
3087 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3088 EmitHelper(data);
3089 }
3090
3091
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)3092 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3093 // We do not try to reuse pool constants.
3094 RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
3095 if (rmode >= RelocInfo::COMMENT &&
3096 rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL) {
3097 // Adjust code for new modes.
3098 DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode));
3099 // These modes do not need an entry in the constant pool.
3100 }
3101 if (!RelocInfo::IsNone(rinfo.rmode())) {
3102 // Don't record external references unless the heap will be serialized.
3103 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
3104 !serializer_enabled() && !emit_debug_code()) {
3105 return;
3106 }
3107 DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
3108 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
3109 RelocInfo reloc_info_with_ast_id(isolate(), pc_, rmode,
3110 RecordedAstId().ToInt(), NULL);
3111 ClearRecordedAstId();
3112 reloc_info_writer.Write(&reloc_info_with_ast_id);
3113 } else {
3114 reloc_info_writer.Write(&rinfo);
3115 }
3116 }
3117 }
3118
3119
BlockTrampolinePoolFor(int instructions)3120 void Assembler::BlockTrampolinePoolFor(int instructions) {
3121 CheckTrampolinePoolQuick(instructions);
3122 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
3123 }
3124
3125
CheckTrampolinePool()3126 void Assembler::CheckTrampolinePool() {
3127 // Some small sequences of instructions must not be broken up by the
3128 // insertion of a trampoline pool; such sequences are protected by setting
3129 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
3130 // which are both checked here. Also, recursive calls to CheckTrampolinePool
3131 // are blocked by trampoline_pool_blocked_nesting_.
3132 if ((trampoline_pool_blocked_nesting_ > 0) ||
3133 (pc_offset() < no_trampoline_pool_before_)) {
3134 // Emission is currently blocked; make sure we try again as soon as
3135 // possible.
3136 if (trampoline_pool_blocked_nesting_ > 0) {
3137 next_buffer_check_ = pc_offset() + kInstrSize;
3138 } else {
3139 next_buffer_check_ = no_trampoline_pool_before_;
3140 }
3141 return;
3142 }
3143
3144 DCHECK(!trampoline_emitted_);
3145 DCHECK(unbound_labels_count_ >= 0);
3146 if (unbound_labels_count_ > 0) {
3147 // First we emit jump (2 instructions), then we emit trampoline pool.
3148 { BlockTrampolinePoolScope block_trampoline_pool(this);
3149 Label after_pool;
3150 if (IsMipsArchVariant(kMips32r6)) {
3151 bc(&after_pool);
3152 } else {
3153 b(&after_pool);
3154 nop();
3155 }
3156
3157 int pool_start = pc_offset();
3158 if (IsMipsArchVariant(kMips32r6)) {
3159 for (int i = 0; i < unbound_labels_count_; i++) {
3160 uint32_t imm32;
3161 imm32 = jump_address(&after_pool);
3162 uint32_t lui_offset, jic_offset;
3163 UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
3164 {
3165 BlockGrowBufferScope block_buf_growth(this);
3166 // Buffer growth (and relocation) must be blocked for internal
3167 // references until associated instructions are emitted and
3168 // available to be patched.
3169 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3170 lui(at, lui_offset);
3171 jic(at, jic_offset);
3172 }
3173 CheckBuffer();
3174 }
3175 } else {
3176 for (int i = 0; i < unbound_labels_count_; i++) {
3177 uint32_t imm32;
3178 imm32 = jump_address(&after_pool);
3179 {
3180 BlockGrowBufferScope block_buf_growth(this);
3181 // Buffer growth (and relocation) must be blocked for internal
3182 // references until associated instructions are emitted and
3183 // available to be patched.
3184 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3185 lui(at, (imm32 & kHiMask) >> kLuiShift);
3186 ori(at, at, (imm32 & kImm16Mask));
3187 }
3188 CheckBuffer();
3189 jr(at);
3190 nop();
3191 }
3192 }
3193 bind(&after_pool);
3194 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
3195
3196 trampoline_emitted_ = true;
3197 // As we are only going to emit trampoline once, we need to prevent any
3198 // further emission.
3199 next_buffer_check_ = kMaxInt;
3200 }
3201 } else {
3202 // Number of branches to unbound label at this point is zero, so we can
3203 // move next buffer check to maximum.
3204 next_buffer_check_ = pc_offset() +
3205 kMaxBranchOffset - kTrampolineSlotsSize * 16;
3206 }
3207 return;
3208 }
3209
3210
target_address_at(Address pc)3211 Address Assembler::target_address_at(Address pc) {
3212 Instr instr1 = instr_at(pc);
3213 Instr instr2 = instr_at(pc + kInstrSize);
3214 // Interpret 2 instructions generated by li: lui/ori
3215 if (IsLui(instr1) && IsOri(instr2)) {
3216 // Assemble the 32 bit value.
3217 return reinterpret_cast<Address>((GetImmediate16(instr1) << kLuiShift) |
3218 GetImmediate16(instr2));
3219 }
3220
3221 // We should never get here, force a bad address if we do.
3222 UNREACHABLE();
3223 return (Address)0x0;
3224 }
3225
3226
3227 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
3228 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
3229 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
3230 // OS::nan_value() returns a qNaN.
QuietNaN(HeapObject * object)3231 void Assembler::QuietNaN(HeapObject* object) {
3232 HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
3233 }
3234
3235
3236 // On Mips, a target address is stored in a lui/ori instruction pair, each
3237 // of which load 16 bits of the 32-bit address to a register.
3238 // Patching the address must replace both instr, and flush the i-cache.
3239 // On r6, target address is stored in a lui/jic pair, and both instr have to be
3240 // patched.
3241 //
3242 // There is an optimization below, which emits a nop when the address
3243 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
3244 // and possibly removed.
set_target_address_at(Isolate * isolate,Address pc,Address target,ICacheFlushMode icache_flush_mode)3245 void Assembler::set_target_address_at(Isolate* isolate, Address pc,
3246 Address target,
3247 ICacheFlushMode icache_flush_mode) {
3248 Instr instr2 = instr_at(pc + kInstrSize);
3249 uint32_t rt_code = GetRtField(instr2);
3250 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
3251 uint32_t itarget = reinterpret_cast<uint32_t>(target);
3252
3253 #ifdef DEBUG
3254 // Check we have the result from a li macro-instruction, using instr pair.
3255 Instr instr1 = instr_at(pc);
3256 CHECK(IsLui(instr1) && (IsOri(instr2) || IsJicOrJialc(instr2)));
3257 #endif
3258
3259 if (IsJicOrJialc(instr2)) {
3260 // Must use 2 instructions to insure patchable code => use lui and jic
3261 uint32_t lui_offset, jic_offset;
3262 Assembler::UnpackTargetAddressUnsigned(itarget, lui_offset, jic_offset);
3263
3264 *p &= ~kImm16Mask;
3265 *(p + 1) &= ~kImm16Mask;
3266
3267 *p |= lui_offset;
3268 *(p + 1) |= jic_offset;
3269
3270 } else {
3271 // Must use 2 instructions to insure patchable code => just use lui and ori.
3272 // lui rt, upper-16.
3273 // ori rt rt, lower-16.
3274 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
3275 *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
3276 }
3277
3278 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
3279 Assembler::FlushICache(isolate, pc, 2 * sizeof(int32_t));
3280 }
3281 }
3282
3283 } // namespace internal
3284 } // namespace v8
3285
3286 #endif // V8_TARGET_ARCH_MIPS
3287