1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34
35 #include "src/mips/assembler-mips.h"
36
37 #if V8_TARGET_ARCH_MIPS
38
39 #include "src/base/bits.h"
40 #include "src/base/cpu.h"
41 #include "src/mips/assembler-mips-inl.h"
42
43 namespace v8 {
44 namespace internal {
45
46 // Get the CPU features enabled by the build. For cross compilation the
47 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
48 // can be defined to enable FPU instructions when building the
49 // snapshot.
CpuFeaturesImpliedByCompiler()50 static unsigned CpuFeaturesImpliedByCompiler() {
51 unsigned answer = 0;
52 #ifdef CAN_USE_FPU_INSTRUCTIONS
53 answer |= 1u << FPU;
54 #endif // def CAN_USE_FPU_INSTRUCTIONS
55
56 // If the compiler is allowed to use FPU then we can use FPU too in our code
57 // generation even when generating snapshots. This won't work for cross
58 // compilation.
59 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
60 answer |= 1u << FPU;
61 #endif
62
63 return answer;
64 }
65
66
ProbeImpl(bool cross_compile)67 void CpuFeatures::ProbeImpl(bool cross_compile) {
68 supported_ |= CpuFeaturesImpliedByCompiler();
69
70 // Only use statically determined features for cross compile (snapshot).
71 if (cross_compile) return;
72
73 // If the compiler is allowed to use fpu then we can use fpu too in our
74 // code generation.
75 #ifndef __mips__
76 // For the simulator build, use FPU.
77 supported_ |= 1u << FPU;
78 #if defined(_MIPS_ARCH_MIPS32R6)
79 // FP64 mode is implied on r6.
80 supported_ |= 1u << FP64FPU;
81 #endif
82 #if defined(FPU_MODE_FP64)
83 supported_ |= 1u << FP64FPU;
84 #endif
85 #else
86 // Probe for additional features at runtime.
87 base::CPU cpu;
88 if (cpu.has_fpu()) supported_ |= 1u << FPU;
89 #if defined(FPU_MODE_FPXX)
90 if (cpu.is_fp64_mode()) supported_ |= 1u << FP64FPU;
91 #elif defined(FPU_MODE_FP64)
92 supported_ |= 1u << FP64FPU;
93 #endif
94 #if defined(_MIPS_ARCH_MIPS32RX)
95 if (cpu.architecture() == 6) {
96 supported_ |= 1u << MIPSr6;
97 } else if (cpu.architecture() == 2) {
98 supported_ |= 1u << MIPSr1;
99 supported_ |= 1u << MIPSr2;
100 } else {
101 supported_ |= 1u << MIPSr1;
102 }
103 #endif
104 #endif
105 }
106
107
PrintTarget()108 void CpuFeatures::PrintTarget() { }
PrintFeatures()109 void CpuFeatures::PrintFeatures() { }
110
111
ToNumber(Register reg)112 int ToNumber(Register reg) {
113 DCHECK(reg.is_valid());
114 const int kNumbers[] = {
115 0, // zero_reg
116 1, // at
117 2, // v0
118 3, // v1
119 4, // a0
120 5, // a1
121 6, // a2
122 7, // a3
123 8, // t0
124 9, // t1
125 10, // t2
126 11, // t3
127 12, // t4
128 13, // t5
129 14, // t6
130 15, // t7
131 16, // s0
132 17, // s1
133 18, // s2
134 19, // s3
135 20, // s4
136 21, // s5
137 22, // s6
138 23, // s7
139 24, // t8
140 25, // t9
141 26, // k0
142 27, // k1
143 28, // gp
144 29, // sp
145 30, // fp
146 31, // ra
147 };
148 return kNumbers[reg.code()];
149 }
150
151
ToRegister(int num)152 Register ToRegister(int num) {
153 DCHECK(num >= 0 && num < kNumRegisters);
154 const Register kRegisters[] = {
155 zero_reg,
156 at,
157 v0, v1,
158 a0, a1, a2, a3,
159 t0, t1, t2, t3, t4, t5, t6, t7,
160 s0, s1, s2, s3, s4, s5, s6, s7,
161 t8, t9,
162 k0, k1,
163 gp,
164 sp,
165 fp,
166 ra
167 };
168 return kRegisters[num];
169 }
170
171
172 // -----------------------------------------------------------------------------
173 // Implementation of RelocInfo.
174
175 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
176 1 << RelocInfo::INTERNAL_REFERENCE |
177 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
178
179
IsCodedSpecially()180 bool RelocInfo::IsCodedSpecially() {
181 // The deserializer needs to know whether a pointer is specially coded. Being
182 // specially coded on MIPS means that it is a lui/ori instruction, and that is
183 // always the case inside code objects.
184 return true;
185 }
186
187
IsInConstantPool()188 bool RelocInfo::IsInConstantPool() {
189 return false;
190 }
191
wasm_memory_reference()192 Address RelocInfo::wasm_memory_reference() {
193 DCHECK(IsWasmMemoryReference(rmode_));
194 return Assembler::target_address_at(pc_, host_);
195 }
196
wasm_global_reference()197 Address RelocInfo::wasm_global_reference() {
198 DCHECK(IsWasmGlobalReference(rmode_));
199 return Assembler::target_address_at(pc_, host_);
200 }
201
wasm_memory_size_reference()202 uint32_t RelocInfo::wasm_memory_size_reference() {
203 DCHECK(IsWasmMemorySizeReference(rmode_));
204 return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
205 }
206
unchecked_update_wasm_memory_reference(Address address,ICacheFlushMode flush_mode)207 void RelocInfo::unchecked_update_wasm_memory_reference(
208 Address address, ICacheFlushMode flush_mode) {
209 Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
210 }
211
unchecked_update_wasm_memory_size(uint32_t size,ICacheFlushMode flush_mode)212 void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
213 ICacheFlushMode flush_mode) {
214 Assembler::set_target_address_at(isolate_, pc_, host_,
215 reinterpret_cast<Address>(size), flush_mode);
216 }
217
218 // -----------------------------------------------------------------------------
219 // Implementation of Operand and MemOperand.
220 // See assembler-mips-inl.h for inlined constructors.
221
Operand(Handle<Object> handle)222 Operand::Operand(Handle<Object> handle) {
223 AllowDeferredHandleDereference using_raw_address;
224 rm_ = no_reg;
225 // Verify all Objects referred by code are NOT in new space.
226 Object* obj = *handle;
227 if (obj->IsHeapObject()) {
228 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
229 imm32_ = reinterpret_cast<intptr_t>(handle.location());
230 rmode_ = RelocInfo::EMBEDDED_OBJECT;
231 } else {
232 // No relocation needed.
233 imm32_ = reinterpret_cast<intptr_t>(obj);
234 rmode_ = RelocInfo::NONE32;
235 }
236 }
237
238
MemOperand(Register rm,int32_t offset)239 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
240 offset_ = offset;
241 }
242
243
MemOperand(Register rm,int32_t unit,int32_t multiplier,OffsetAddend offset_addend)244 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
245 OffsetAddend offset_addend) : Operand(rm) {
246 offset_ = unit * multiplier + offset_addend;
247 }
248
249
250 // -----------------------------------------------------------------------------
251 // Specific instructions, constants, and masks.
252
253 static const int kNegOffset = 0x00008000;
254 // addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
255 // operations as post-increment of sp.
256 const Instr kPopInstruction = ADDIU | (Register::kCode_sp << kRsShift) |
257 (Register::kCode_sp << kRtShift) |
258 (kPointerSize & kImm16Mask); // NOLINT
259 // addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
260 const Instr kPushInstruction = ADDIU | (Register::kCode_sp << kRsShift) |
261 (Register::kCode_sp << kRtShift) |
262 (-kPointerSize & kImm16Mask); // NOLINT
263 // sw(r, MemOperand(sp, 0))
264 const Instr kPushRegPattern =
265 SW | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
266 // lw(r, MemOperand(sp, 0))
267 const Instr kPopRegPattern =
268 LW | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
269
270 const Instr kLwRegFpOffsetPattern =
271 LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
272
273 const Instr kSwRegFpOffsetPattern =
274 SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
275
276 const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) |
277 (kNegOffset & kImm16Mask); // NOLINT
278
279 const Instr kSwRegFpNegOffsetPattern = SW | (Register::kCode_fp << kRsShift) |
280 (kNegOffset & kImm16Mask); // NOLINT
281 // A mask for the Rt register for push, pop, lw, sw instructions.
282 const Instr kRtMask = kRtFieldMask;
283 const Instr kLwSwInstrTypeMask = 0xffe00000;
284 const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
285 const Instr kLwSwOffsetMask = kImm16Mask;
286
287
Assembler(Isolate * isolate,void * buffer,int buffer_size)288 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
289 : AssemblerBase(isolate, buffer, buffer_size),
290 recorded_ast_id_(TypeFeedbackId::None()),
291 positions_recorder_(this) {
292 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
293
294 last_trampoline_pool_end_ = 0;
295 no_trampoline_pool_before_ = 0;
296 trampoline_pool_blocked_nesting_ = 0;
297 // We leave space (16 * kTrampolineSlotsSize)
298 // for BlockTrampolinePoolScope buffer.
299 next_buffer_check_ = FLAG_force_long_branches
300 ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
301 internal_trampoline_exception_ = false;
302 last_bound_pos_ = 0;
303
304 trampoline_emitted_ = FLAG_force_long_branches;
305 unbound_labels_count_ = 0;
306 block_buffer_growth_ = false;
307
308 ClearRecordedAstId();
309 }
310
311
GetCode(CodeDesc * desc)312 void Assembler::GetCode(CodeDesc* desc) {
313 EmitForbiddenSlotInstruction();
314 DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
315 // Set up code descriptor.
316 desc->buffer = buffer_;
317 desc->buffer_size = buffer_size_;
318 desc->instr_size = pc_offset();
319 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
320 desc->origin = this;
321 desc->constant_pool_size = 0;
322 desc->unwinding_info_size = 0;
323 desc->unwinding_info = nullptr;
324 }
325
326
Align(int m)327 void Assembler::Align(int m) {
328 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
329 EmitForbiddenSlotInstruction();
330 while ((pc_offset() & (m - 1)) != 0) {
331 nop();
332 }
333 }
334
335
CodeTargetAlign()336 void Assembler::CodeTargetAlign() {
337 // No advantage to aligning branch/call targets to more than
338 // single instruction, that I am aware of.
339 Align(4);
340 }
341
342
GetRtReg(Instr instr)343 Register Assembler::GetRtReg(Instr instr) {
344 Register rt;
345 rt.reg_code = (instr & kRtFieldMask) >> kRtShift;
346 return rt;
347 }
348
349
GetRsReg(Instr instr)350 Register Assembler::GetRsReg(Instr instr) {
351 Register rs;
352 rs.reg_code = (instr & kRsFieldMask) >> kRsShift;
353 return rs;
354 }
355
356
GetRdReg(Instr instr)357 Register Assembler::GetRdReg(Instr instr) {
358 Register rd;
359 rd.reg_code = (instr & kRdFieldMask) >> kRdShift;
360 return rd;
361 }
362
363
GetRt(Instr instr)364 uint32_t Assembler::GetRt(Instr instr) {
365 return (instr & kRtFieldMask) >> kRtShift;
366 }
367
368
GetRtField(Instr instr)369 uint32_t Assembler::GetRtField(Instr instr) {
370 return instr & kRtFieldMask;
371 }
372
373
GetRs(Instr instr)374 uint32_t Assembler::GetRs(Instr instr) {
375 return (instr & kRsFieldMask) >> kRsShift;
376 }
377
378
GetRsField(Instr instr)379 uint32_t Assembler::GetRsField(Instr instr) {
380 return instr & kRsFieldMask;
381 }
382
383
GetRd(Instr instr)384 uint32_t Assembler::GetRd(Instr instr) {
385 return (instr & kRdFieldMask) >> kRdShift;
386 }
387
388
GetRdField(Instr instr)389 uint32_t Assembler::GetRdField(Instr instr) {
390 return instr & kRdFieldMask;
391 }
392
393
GetSa(Instr instr)394 uint32_t Assembler::GetSa(Instr instr) {
395 return (instr & kSaFieldMask) >> kSaShift;
396 }
397
398
GetSaField(Instr instr)399 uint32_t Assembler::GetSaField(Instr instr) {
400 return instr & kSaFieldMask;
401 }
402
403
GetOpcodeField(Instr instr)404 uint32_t Assembler::GetOpcodeField(Instr instr) {
405 return instr & kOpcodeMask;
406 }
407
408
GetFunction(Instr instr)409 uint32_t Assembler::GetFunction(Instr instr) {
410 return (instr & kFunctionFieldMask) >> kFunctionShift;
411 }
412
413
GetFunctionField(Instr instr)414 uint32_t Assembler::GetFunctionField(Instr instr) {
415 return instr & kFunctionFieldMask;
416 }
417
418
GetImmediate16(Instr instr)419 uint32_t Assembler::GetImmediate16(Instr instr) {
420 return instr & kImm16Mask;
421 }
422
423
GetLabelConst(Instr instr)424 uint32_t Assembler::GetLabelConst(Instr instr) {
425 return instr & ~kImm16Mask;
426 }
427
428
IsPop(Instr instr)429 bool Assembler::IsPop(Instr instr) {
430 return (instr & ~kRtMask) == kPopRegPattern;
431 }
432
433
IsPush(Instr instr)434 bool Assembler::IsPush(Instr instr) {
435 return (instr & ~kRtMask) == kPushRegPattern;
436 }
437
438
IsSwRegFpOffset(Instr instr)439 bool Assembler::IsSwRegFpOffset(Instr instr) {
440 return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
441 }
442
443
IsLwRegFpOffset(Instr instr)444 bool Assembler::IsLwRegFpOffset(Instr instr) {
445 return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
446 }
447
448
IsSwRegFpNegOffset(Instr instr)449 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
450 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
451 kSwRegFpNegOffsetPattern);
452 }
453
454
IsLwRegFpNegOffset(Instr instr)455 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
456 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
457 kLwRegFpNegOffsetPattern);
458 }
459
460
461 // Labels refer to positions in the (to be) generated code.
462 // There are bound, linked, and unused labels.
463 //
464 // Bound labels refer to known positions in the already
465 // generated code. pos() is the position the label refers to.
466 //
467 // Linked labels refer to unknown positions in the code
468 // to be generated; pos() is the position of the last
469 // instruction using the label.
470
471 // The link chain is terminated by a value in the instruction of -1,
472 // which is an otherwise illegal value (branch -1 is inf loop).
473 // The instruction 16-bit offset field addresses 32-bit words, but in
474 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
475
476 const int kEndOfChain = -4;
477 // Determines the end of the Jump chain (a subset of the label link chain).
478 const int kEndOfJumpChain = 0;
479
480
IsBranch(Instr instr)481 bool Assembler::IsBranch(Instr instr) {
482 uint32_t opcode = GetOpcodeField(instr);
483 uint32_t rt_field = GetRtField(instr);
484 uint32_t rs_field = GetRsField(instr);
485 // Checks if the instruction is a branch.
486 bool isBranch =
487 opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
488 opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
489 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
490 rt_field == BLTZAL || rt_field == BGEZAL)) ||
491 (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
492 (opcode == COP1 && rs_field == BC1EQZ) ||
493 (opcode == COP1 && rs_field == BC1NEZ);
494 if (!isBranch && IsMipsArchVariant(kMips32r6)) {
495 // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
496 // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
497 isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
498 opcode == BALC ||
499 (opcode == POP66 && rs_field != 0) || // BEQZC
500 (opcode == POP76 && rs_field != 0); // BNEZC
501 }
502 return isBranch;
503 }
504
505
IsBc(Instr instr)506 bool Assembler::IsBc(Instr instr) {
507 uint32_t opcode = GetOpcodeField(instr);
508 // Checks if the instruction is a BC or BALC.
509 return opcode == BC || opcode == BALC;
510 }
511
512
IsBzc(Instr instr)513 bool Assembler::IsBzc(Instr instr) {
514 uint32_t opcode = GetOpcodeField(instr);
515 // Checks if the instruction is BEQZC or BNEZC.
516 return (opcode == POP66 && GetRsField(instr) != 0) ||
517 (opcode == POP76 && GetRsField(instr) != 0);
518 }
519
520
IsEmittedConstant(Instr instr)521 bool Assembler::IsEmittedConstant(Instr instr) {
522 uint32_t label_constant = GetLabelConst(instr);
523 return label_constant == 0; // Emitted label const in reg-exp engine.
524 }
525
526
IsBeq(Instr instr)527 bool Assembler::IsBeq(Instr instr) {
528 return GetOpcodeField(instr) == BEQ;
529 }
530
531
IsBne(Instr instr)532 bool Assembler::IsBne(Instr instr) {
533 return GetOpcodeField(instr) == BNE;
534 }
535
536
IsBeqzc(Instr instr)537 bool Assembler::IsBeqzc(Instr instr) {
538 uint32_t opcode = GetOpcodeField(instr);
539 return opcode == POP66 && GetRsField(instr) != 0;
540 }
541
542
IsBnezc(Instr instr)543 bool Assembler::IsBnezc(Instr instr) {
544 uint32_t opcode = GetOpcodeField(instr);
545 return opcode == POP76 && GetRsField(instr) != 0;
546 }
547
548
IsBeqc(Instr instr)549 bool Assembler::IsBeqc(Instr instr) {
550 uint32_t opcode = GetOpcodeField(instr);
551 uint32_t rs = GetRsField(instr);
552 uint32_t rt = GetRtField(instr);
553 return opcode == POP10 && rs != 0 && rs < rt; // && rt != 0
554 }
555
556
IsBnec(Instr instr)557 bool Assembler::IsBnec(Instr instr) {
558 uint32_t opcode = GetOpcodeField(instr);
559 uint32_t rs = GetRsField(instr);
560 uint32_t rt = GetRtField(instr);
561 return opcode == POP30 && rs != 0 && rs < rt; // && rt != 0
562 }
563
IsJicOrJialc(Instr instr)564 bool Assembler::IsJicOrJialc(Instr instr) {
565 uint32_t opcode = GetOpcodeField(instr);
566 uint32_t rs = GetRsField(instr);
567 return (opcode == POP66 || opcode == POP76) && rs == 0;
568 }
569
IsJump(Instr instr)570 bool Assembler::IsJump(Instr instr) {
571 uint32_t opcode = GetOpcodeField(instr);
572 uint32_t rt_field = GetRtField(instr);
573 uint32_t rd_field = GetRdField(instr);
574 uint32_t function_field = GetFunctionField(instr);
575 // Checks if the instruction is a jump.
576 return opcode == J || opcode == JAL ||
577 (opcode == SPECIAL && rt_field == 0 &&
578 ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
579 }
580
IsJ(Instr instr)581 bool Assembler::IsJ(Instr instr) {
582 uint32_t opcode = GetOpcodeField(instr);
583 // Checks if the instruction is a jump.
584 return opcode == J;
585 }
586
587
IsJal(Instr instr)588 bool Assembler::IsJal(Instr instr) {
589 return GetOpcodeField(instr) == JAL;
590 }
591
592
IsJr(Instr instr)593 bool Assembler::IsJr(Instr instr) {
594 if (!IsMipsArchVariant(kMips32r6)) {
595 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
596 } else {
597 return GetOpcodeField(instr) == SPECIAL &&
598 GetRdField(instr) == 0 && GetFunctionField(instr) == JALR;
599 }
600 }
601
602
IsJalr(Instr instr)603 bool Assembler::IsJalr(Instr instr) {
604 return GetOpcodeField(instr) == SPECIAL &&
605 GetRdField(instr) != 0 && GetFunctionField(instr) == JALR;
606 }
607
608
IsLui(Instr instr)609 bool Assembler::IsLui(Instr instr) {
610 uint32_t opcode = GetOpcodeField(instr);
611 // Checks if the instruction is a load upper immediate.
612 return opcode == LUI;
613 }
614
615
IsOri(Instr instr)616 bool Assembler::IsOri(Instr instr) {
617 uint32_t opcode = GetOpcodeField(instr);
618 // Checks if the instruction is a load upper immediate.
619 return opcode == ORI;
620 }
621
622
IsNop(Instr instr,unsigned int type)623 bool Assembler::IsNop(Instr instr, unsigned int type) {
624 // See Assembler::nop(type).
625 DCHECK(type < 32);
626 uint32_t opcode = GetOpcodeField(instr);
627 uint32_t function = GetFunctionField(instr);
628 uint32_t rt = GetRt(instr);
629 uint32_t rd = GetRd(instr);
630 uint32_t sa = GetSa(instr);
631
632 // Traditional mips nop == sll(zero_reg, zero_reg, 0)
633 // When marking non-zero type, use sll(zero_reg, at, type)
634 // to avoid use of mips ssnop and ehb special encodings
635 // of the sll instruction.
636
637 Register nop_rt_reg = (type == 0) ? zero_reg : at;
638 bool ret = (opcode == SPECIAL && function == SLL &&
639 rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
640 rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
641 sa == type);
642
643 return ret;
644 }
645
646
GetBranchOffset(Instr instr)647 int32_t Assembler::GetBranchOffset(Instr instr) {
648 DCHECK(IsBranch(instr));
649 return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
650 }
651
652
IsLw(Instr instr)653 bool Assembler::IsLw(Instr instr) {
654 return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
655 }
656
657
GetLwOffset(Instr instr)658 int16_t Assembler::GetLwOffset(Instr instr) {
659 DCHECK(IsLw(instr));
660 return ((instr & kImm16Mask));
661 }
662
663
SetLwOffset(Instr instr,int16_t offset)664 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
665 DCHECK(IsLw(instr));
666
667 // We actually create a new lw instruction based on the original one.
668 Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
669 | (offset & kImm16Mask);
670
671 return temp_instr;
672 }
673
674
IsSw(Instr instr)675 bool Assembler::IsSw(Instr instr) {
676 return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
677 }
678
679
SetSwOffset(Instr instr,int16_t offset)680 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
681 DCHECK(IsSw(instr));
682 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
683 }
684
685
IsAddImmediate(Instr instr)686 bool Assembler::IsAddImmediate(Instr instr) {
687 return ((instr & kOpcodeMask) == ADDIU);
688 }
689
690
SetAddImmediateOffset(Instr instr,int16_t offset)691 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
692 DCHECK(IsAddImmediate(instr));
693 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
694 }
695
696
IsAndImmediate(Instr instr)697 bool Assembler::IsAndImmediate(Instr instr) {
698 return GetOpcodeField(instr) == ANDI;
699 }
700
701
OffsetSizeInBits(Instr instr)702 static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
703 if (IsMipsArchVariant(kMips32r6)) {
704 if (Assembler::IsBc(instr)) {
705 return Assembler::OffsetSize::kOffset26;
706 } else if (Assembler::IsBzc(instr)) {
707 return Assembler::OffsetSize::kOffset21;
708 }
709 }
710 return Assembler::OffsetSize::kOffset16;
711 }
712
713
AddBranchOffset(int pos,Instr instr)714 static inline int32_t AddBranchOffset(int pos, Instr instr) {
715 int bits = OffsetSizeInBits(instr);
716 const int32_t mask = (1 << bits) - 1;
717 bits = 32 - bits;
718
719 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
720 // the compiler uses arithmetic shifts for signed integers.
721 int32_t imm = ((instr & mask) << bits) >> (bits - 2);
722
723 if (imm == kEndOfChain) {
724 // EndOfChain sentinel is returned directly, not relative to pc or pos.
725 return kEndOfChain;
726 } else {
727 return pos + Assembler::kBranchPCOffset + imm;
728 }
729 }
730
CreateTargetAddress(Instr instr_lui,Instr instr_jic)731 uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) {
732 DCHECK(IsLui(instr_lui) && IsJicOrJialc(instr_jic));
733 int16_t jic_offset = GetImmediate16(instr_jic);
734 int16_t lui_offset = GetImmediate16(instr_lui);
735
736 if (jic_offset < 0) {
737 lui_offset += kImm16Mask;
738 }
739 uint32_t lui_offset_u = (static_cast<uint32_t>(lui_offset)) << kLuiShift;
740 uint32_t jic_offset_u = static_cast<uint32_t>(jic_offset) & kImm16Mask;
741
742 return lui_offset_u | jic_offset_u;
743 }
744
745 // Use just lui and jic instructions. Insert lower part of the target address in
746 // jic offset part. Since jic sign-extends offset and then add it with register,
747 // before that addition, difference between upper part of the target address and
748 // upper part of the sign-extended offset (0xffff or 0x0000), will be inserted
749 // in jic register with lui instruction.
UnpackTargetAddress(uint32_t address,int16_t & lui_offset,int16_t & jic_offset)750 void Assembler::UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
751 int16_t& jic_offset) {
752 lui_offset = (address & kHiMask) >> kLuiShift;
753 jic_offset = address & kLoMask;
754
755 if (jic_offset < 0) {
756 lui_offset -= kImm16Mask;
757 }
758 }
759
UnpackTargetAddressUnsigned(uint32_t address,uint32_t & lui_offset,uint32_t & jic_offset)760 void Assembler::UnpackTargetAddressUnsigned(uint32_t address,
761 uint32_t& lui_offset,
762 uint32_t& jic_offset) {
763 int16_t lui_offset16 = (address & kHiMask) >> kLuiShift;
764 int16_t jic_offset16 = address & kLoMask;
765
766 if (jic_offset16 < 0) {
767 lui_offset16 -= kImm16Mask;
768 }
769 lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask;
770 jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask;
771 }
772
target_at(int pos,bool is_internal)773 int Assembler::target_at(int pos, bool is_internal) {
774 Instr instr = instr_at(pos);
775 if (is_internal) {
776 if (instr == 0) {
777 return kEndOfChain;
778 } else {
779 int32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
780 int delta = static_cast<int>(instr_address - instr);
781 DCHECK(pos > delta);
782 return pos - delta;
783 }
784 }
785 if ((instr & ~kImm16Mask) == 0) {
786 // Emitted label constant, not part of a branch.
787 if (instr == 0) {
788 return kEndOfChain;
789 } else {
790 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
791 return (imm18 + pos);
792 }
793 }
794 // Check we have a branch or jump instruction.
795 DCHECK(IsBranch(instr) || IsLui(instr));
796 if (IsBranch(instr)) {
797 return AddBranchOffset(pos, instr);
798 } else {
799 Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
800 Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
801 DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
802 int32_t imm;
803 if (IsJicOrJialc(instr2)) {
804 imm = CreateTargetAddress(instr1, instr2);
805 } else {
806 imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
807 imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
808 }
809
810 if (imm == kEndOfJumpChain) {
811 // EndOfChain sentinel is returned directly, not relative to pc or pos.
812 return kEndOfChain;
813 } else {
814 uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
815 int32_t delta = instr_address - imm;
816 DCHECK(pos > delta);
817 return pos - delta;
818 }
819 }
820 return 0;
821 }
822
823
SetBranchOffset(int32_t pos,int32_t target_pos,Instr instr)824 static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
825 Instr instr) {
826 int32_t bits = OffsetSizeInBits(instr);
827 int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
828 DCHECK((imm & 3) == 0);
829 imm >>= 2;
830
831 const int32_t mask = (1 << bits) - 1;
832 instr &= ~mask;
833 DCHECK(is_intn(imm, bits));
834
835 return instr | (imm & mask);
836 }
837
838
target_at_put(int32_t pos,int32_t target_pos,bool is_internal)839 void Assembler::target_at_put(int32_t pos, int32_t target_pos,
840 bool is_internal) {
841 Instr instr = instr_at(pos);
842
843 if (is_internal) {
844 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
845 instr_at_put(pos, imm);
846 return;
847 }
848 if ((instr & ~kImm16Mask) == 0) {
849 DCHECK(target_pos == kEndOfChain || target_pos >= 0);
850 // Emitted label constant, not part of a branch.
851 // Make label relative to Code* of generated Code object.
852 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
853 return;
854 }
855
856 DCHECK(IsBranch(instr) || IsLui(instr));
857 if (IsBranch(instr)) {
858 instr = SetBranchOffset(pos, target_pos, instr);
859 instr_at_put(pos, instr);
860 } else {
861 Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
862 Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
863 DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
864 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
865 DCHECK((imm & 3) == 0);
866 DCHECK(IsLui(instr1) && (IsJicOrJialc(instr2) || IsOri(instr2)));
867 instr1 &= ~kImm16Mask;
868 instr2 &= ~kImm16Mask;
869
870 if (IsJicOrJialc(instr2)) {
871 uint32_t lui_offset_u, jic_offset_u;
872 UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
873 instr_at_put(pos + 0 * Assembler::kInstrSize, instr1 | lui_offset_u);
874 instr_at_put(pos + 1 * Assembler::kInstrSize, instr2 | jic_offset_u);
875 } else {
876 instr_at_put(pos + 0 * Assembler::kInstrSize,
877 instr1 | ((imm & kHiMask) >> kLuiShift));
878 instr_at_put(pos + 1 * Assembler::kInstrSize,
879 instr2 | (imm & kImm16Mask));
880 }
881 }
882 }
883
884
print(Label * L)885 void Assembler::print(Label* L) {
886 if (L->is_unused()) {
887 PrintF("unused label\n");
888 } else if (L->is_bound()) {
889 PrintF("bound label to %d\n", L->pos());
890 } else if (L->is_linked()) {
891 Label l = *L;
892 PrintF("unbound label");
893 while (l.is_linked()) {
894 PrintF("@ %d ", l.pos());
895 Instr instr = instr_at(l.pos());
896 if ((instr & ~kImm16Mask) == 0) {
897 PrintF("value\n");
898 } else {
899 PrintF("%d\n", instr);
900 }
901 next(&l, internal_reference_positions_.find(l.pos()) !=
902 internal_reference_positions_.end());
903 }
904 } else {
905 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
906 }
907 }
908
909
bind_to(Label * L,int pos)910 void Assembler::bind_to(Label* L, int pos) {
911 DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
912 int32_t trampoline_pos = kInvalidSlotPos;
913 bool is_internal = false;
914 if (L->is_linked() && !trampoline_emitted_) {
915 unbound_labels_count_--;
916 next_buffer_check_ += kTrampolineSlotsSize;
917 }
918
919 while (L->is_linked()) {
920 int32_t fixup_pos = L->pos();
921 int32_t dist = pos - fixup_pos;
922 is_internal = internal_reference_positions_.find(fixup_pos) !=
923 internal_reference_positions_.end();
924 next(L, is_internal); // Call next before overwriting link with target at
925 // fixup_pos.
926 Instr instr = instr_at(fixup_pos);
927 if (is_internal) {
928 target_at_put(fixup_pos, pos, is_internal);
929 } else {
930 if (IsBranch(instr)) {
931 int branch_offset = BranchOffset(instr);
932 if (dist > branch_offset) {
933 if (trampoline_pos == kInvalidSlotPos) {
934 trampoline_pos = get_trampoline_entry(fixup_pos);
935 CHECK(trampoline_pos != kInvalidSlotPos);
936 }
937 CHECK((trampoline_pos - fixup_pos) <= branch_offset);
938 target_at_put(fixup_pos, trampoline_pos, false);
939 fixup_pos = trampoline_pos;
940 dist = pos - fixup_pos;
941 }
942 target_at_put(fixup_pos, pos, false);
943 } else {
944 target_at_put(fixup_pos, pos, false);
945 }
946 }
947 }
948 L->bind_to(pos);
949
950 // Keep track of the last bound label so we don't eliminate any instructions
951 // before a bound label.
952 if (pos > last_bound_pos_)
953 last_bound_pos_ = pos;
954 }
955
956
bind(Label * L)957 void Assembler::bind(Label* L) {
958 DCHECK(!L->is_bound()); // Label can only be bound once.
959 bind_to(L, pc_offset());
960 }
961
962
next(Label * L,bool is_internal)963 void Assembler::next(Label* L, bool is_internal) {
964 DCHECK(L->is_linked());
965 int link = target_at(L->pos(), is_internal);
966 if (link == kEndOfChain) {
967 L->Unuse();
968 } else {
969 DCHECK(link >= 0);
970 L->link_to(link);
971 }
972 }
973
974
is_near(Label * L)975 bool Assembler::is_near(Label* L) {
976 DCHECK(L->is_bound());
977 return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
978 }
979
980
is_near(Label * L,OffsetSize bits)981 bool Assembler::is_near(Label* L, OffsetSize bits) {
982 if (L == nullptr || !L->is_bound()) return true;
983 return pc_offset() - L->pos() < (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize;
984 }
985
986
is_near_branch(Label * L)987 bool Assembler::is_near_branch(Label* L) {
988 DCHECK(L->is_bound());
989 return IsMipsArchVariant(kMips32r6) ? is_near_r6(L) : is_near_pre_r6(L);
990 }
991
992
BranchOffset(Instr instr)993 int Assembler::BranchOffset(Instr instr) {
994 // At pre-R6 and for other R6 branches the offset is 16 bits.
995 int bits = OffsetSize::kOffset16;
996
997 if (IsMipsArchVariant(kMips32r6)) {
998 uint32_t opcode = GetOpcodeField(instr);
999 switch (opcode) {
1000 // Checks BC or BALC.
1001 case BC:
1002 case BALC:
1003 bits = OffsetSize::kOffset26;
1004 break;
1005
1006 // Checks BEQZC or BNEZC.
1007 case POP66:
1008 case POP76:
1009 if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
1010 break;
1011 default:
1012 break;
1013 }
1014 }
1015
1016 return (1 << (bits + 2 - 1)) - 1;
1017 }
1018
1019
1020 // We have to use a temporary register for things that can be relocated even
1021 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
1022 // space. There is no guarantee that the relocated location can be similarly
1023 // encoded.
MustUseReg(RelocInfo::Mode rmode)1024 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
1025 return !RelocInfo::IsNone(rmode);
1026 }
1027
GenInstrRegister(Opcode opcode,Register rs,Register rt,Register rd,uint16_t sa,SecondaryField func)1028 void Assembler::GenInstrRegister(Opcode opcode,
1029 Register rs,
1030 Register rt,
1031 Register rd,
1032 uint16_t sa,
1033 SecondaryField func) {
1034 DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
1035 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1036 | (rd.code() << kRdShift) | (sa << kSaShift) | func;
1037 emit(instr);
1038 }
1039
1040
GenInstrRegister(Opcode opcode,Register rs,Register rt,uint16_t msb,uint16_t lsb,SecondaryField func)1041 void Assembler::GenInstrRegister(Opcode opcode,
1042 Register rs,
1043 Register rt,
1044 uint16_t msb,
1045 uint16_t lsb,
1046 SecondaryField func) {
1047 DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
1048 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1049 | (msb << kRdShift) | (lsb << kSaShift) | func;
1050 emit(instr);
1051 }
1052
1053
GenInstrRegister(Opcode opcode,SecondaryField fmt,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)1054 void Assembler::GenInstrRegister(Opcode opcode,
1055 SecondaryField fmt,
1056 FPURegister ft,
1057 FPURegister fs,
1058 FPURegister fd,
1059 SecondaryField func) {
1060 DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
1061 Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
1062 | (fd.code() << kFdShift) | func;
1063 emit(instr);
1064 }
1065
1066
GenInstrRegister(Opcode opcode,FPURegister fr,FPURegister ft,FPURegister fs,FPURegister fd,SecondaryField func)1067 void Assembler::GenInstrRegister(Opcode opcode,
1068 FPURegister fr,
1069 FPURegister ft,
1070 FPURegister fs,
1071 FPURegister fd,
1072 SecondaryField func) {
1073 DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
1074 Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
1075 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1076 emit(instr);
1077 }
1078
1079
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPURegister fs,FPURegister fd,SecondaryField func)1080 void Assembler::GenInstrRegister(Opcode opcode,
1081 SecondaryField fmt,
1082 Register rt,
1083 FPURegister fs,
1084 FPURegister fd,
1085 SecondaryField func) {
1086 DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
1087 Instr instr = opcode | fmt | (rt.code() << kRtShift)
1088 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1089 emit(instr);
1090 }
1091
1092
GenInstrRegister(Opcode opcode,SecondaryField fmt,Register rt,FPUControlRegister fs,SecondaryField func)1093 void Assembler::GenInstrRegister(Opcode opcode,
1094 SecondaryField fmt,
1095 Register rt,
1096 FPUControlRegister fs,
1097 SecondaryField func) {
1098 DCHECK(fs.is_valid() && rt.is_valid());
1099 Instr instr =
1100 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
1101 emit(instr);
1102 }
1103
1104
1105 // Instructions with immediate value.
1106 // Registers are in the order of the instruction encoding, from left to right.
GenInstrImmediate(Opcode opcode,Register rs,Register rt,int32_t j,CompactBranchType is_compact_branch)1107 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
1108 int32_t j,
1109 CompactBranchType is_compact_branch) {
1110 DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
1111 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1112 | (j & kImm16Mask);
1113 emit(instr, is_compact_branch);
1114 }
1115
1116
GenInstrImmediate(Opcode opcode,Register rs,SecondaryField SF,int32_t j,CompactBranchType is_compact_branch)1117 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
1118 int32_t j,
1119 CompactBranchType is_compact_branch) {
1120 DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
1121 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
1122 emit(instr, is_compact_branch);
1123 }
1124
1125
GenInstrImmediate(Opcode opcode,Register rs,FPURegister ft,int32_t j,CompactBranchType is_compact_branch)1126 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
1127 int32_t j,
1128 CompactBranchType is_compact_branch) {
1129 DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
1130 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
1131 | (j & kImm16Mask);
1132 emit(instr, is_compact_branch);
1133 }
1134
1135
GenInstrImmediate(Opcode opcode,Register rs,int32_t offset21,CompactBranchType is_compact_branch)1136 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
1137 CompactBranchType is_compact_branch) {
1138 DCHECK(rs.is_valid() && (is_int21(offset21)));
1139 Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1140 emit(instr, is_compact_branch);
1141 }
1142
1143
GenInstrImmediate(Opcode opcode,Register rs,uint32_t offset21)1144 void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
1145 uint32_t offset21) {
1146 DCHECK(rs.is_valid() && (is_uint21(offset21)));
1147 Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1148 emit(instr);
1149 }
1150
1151
GenInstrImmediate(Opcode opcode,int32_t offset26,CompactBranchType is_compact_branch)1152 void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
1153 CompactBranchType is_compact_branch) {
1154 DCHECK(is_int26(offset26));
1155 Instr instr = opcode | (offset26 & kImm26Mask);
1156 emit(instr, is_compact_branch);
1157 }
1158
1159
GenInstrJump(Opcode opcode,uint32_t address)1160 void Assembler::GenInstrJump(Opcode opcode,
1161 uint32_t address) {
1162 BlockTrampolinePoolScope block_trampoline_pool(this);
1163 DCHECK(is_uint26(address));
1164 Instr instr = opcode | address;
1165 emit(instr);
1166 BlockTrampolinePoolFor(1); // For associated delay slot.
1167 }
1168
1169
1170 // Returns the next free trampoline entry.
get_trampoline_entry(int32_t pos)1171 int32_t Assembler::get_trampoline_entry(int32_t pos) {
1172 int32_t trampoline_entry = kInvalidSlotPos;
1173
1174 if (!internal_trampoline_exception_) {
1175 if (trampoline_.start() > pos) {
1176 trampoline_entry = trampoline_.take_slot();
1177 }
1178
1179 if (kInvalidSlotPos == trampoline_entry) {
1180 internal_trampoline_exception_ = true;
1181 }
1182 }
1183 return trampoline_entry;
1184 }
1185
1186
jump_address(Label * L)1187 uint32_t Assembler::jump_address(Label* L) {
1188 int32_t target_pos;
1189
1190 if (L->is_bound()) {
1191 target_pos = L->pos();
1192 } else {
1193 if (L->is_linked()) {
1194 target_pos = L->pos(); // L's link.
1195 L->link_to(pc_offset());
1196 } else {
1197 L->link_to(pc_offset());
1198 return kEndOfJumpChain;
1199 }
1200 }
1201
1202 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
1203 DCHECK((imm & 3) == 0);
1204
1205 return imm;
1206 }
1207
1208
branch_offset_helper(Label * L,OffsetSize bits)1209 int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
1210 int32_t target_pos;
1211 int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1212
1213 if (L->is_bound()) {
1214 target_pos = L->pos();
1215 } else {
1216 if (L->is_linked()) {
1217 target_pos = L->pos();
1218 L->link_to(pc_offset() + pad);
1219 } else {
1220 L->link_to(pc_offset() + pad);
1221 if (!trampoline_emitted_) {
1222 unbound_labels_count_++;
1223 next_buffer_check_ -= kTrampolineSlotsSize;
1224 }
1225 return kEndOfChain;
1226 }
1227 }
1228
1229 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
1230 DCHECK(is_intn(offset, bits + 2));
1231 DCHECK((offset & 3) == 0);
1232
1233 return offset;
1234 }
1235
1236
label_at_put(Label * L,int at_offset)1237 void Assembler::label_at_put(Label* L, int at_offset) {
1238 int target_pos;
1239 if (L->is_bound()) {
1240 target_pos = L->pos();
1241 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1242 } else {
1243 if (L->is_linked()) {
1244 target_pos = L->pos(); // L's link.
1245 int32_t imm18 = target_pos - at_offset;
1246 DCHECK((imm18 & 3) == 0);
1247 int32_t imm16 = imm18 >> 2;
1248 DCHECK(is_int16(imm16));
1249 instr_at_put(at_offset, (imm16 & kImm16Mask));
1250 } else {
1251 target_pos = kEndOfChain;
1252 instr_at_put(at_offset, 0);
1253 if (!trampoline_emitted_) {
1254 unbound_labels_count_++;
1255 next_buffer_check_ -= kTrampolineSlotsSize;
1256 }
1257 }
1258 L->link_to(at_offset);
1259 }
1260 }
1261
1262
1263 //------- Branch and jump instructions --------
1264
b(int16_t offset)1265 void Assembler::b(int16_t offset) {
1266 beq(zero_reg, zero_reg, offset);
1267 }
1268
1269
bal(int16_t offset)1270 void Assembler::bal(int16_t offset) {
1271 bgezal(zero_reg, offset);
1272 }
1273
1274
bc(int32_t offset)1275 void Assembler::bc(int32_t offset) {
1276 DCHECK(IsMipsArchVariant(kMips32r6));
1277 GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
1278 }
1279
1280
balc(int32_t offset)1281 void Assembler::balc(int32_t offset) {
1282 DCHECK(IsMipsArchVariant(kMips32r6));
1283 GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
1284 }
1285
1286
beq(Register rs,Register rt,int16_t offset)1287 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1288 BlockTrampolinePoolScope block_trampoline_pool(this);
1289 GenInstrImmediate(BEQ, rs, rt, offset);
1290 BlockTrampolinePoolFor(1); // For associated delay slot.
1291 }
1292
1293
bgez(Register rs,int16_t offset)1294 void Assembler::bgez(Register rs, int16_t offset) {
1295 BlockTrampolinePoolScope block_trampoline_pool(this);
1296 GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1297 BlockTrampolinePoolFor(1); // For associated delay slot.
1298 }
1299
1300
bgezc(Register rt,int16_t offset)1301 void Assembler::bgezc(Register rt, int16_t offset) {
1302 DCHECK(IsMipsArchVariant(kMips32r6));
1303 DCHECK(!(rt.is(zero_reg)));
1304 GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1305 }
1306
1307
bgeuc(Register rs,Register rt,int16_t offset)1308 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1309 DCHECK(IsMipsArchVariant(kMips32r6));
1310 DCHECK(!(rs.is(zero_reg)));
1311 DCHECK(!(rt.is(zero_reg)));
1312 DCHECK(rs.code() != rt.code());
1313 GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1314 }
1315
1316
bgec(Register rs,Register rt,int16_t offset)1317 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1318 DCHECK(IsMipsArchVariant(kMips32r6));
1319 DCHECK(!(rs.is(zero_reg)));
1320 DCHECK(!(rt.is(zero_reg)));
1321 DCHECK(rs.code() != rt.code());
1322 GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1323 }
1324
1325
bgezal(Register rs,int16_t offset)1326 void Assembler::bgezal(Register rs, int16_t offset) {
1327 DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
1328 BlockTrampolinePoolScope block_trampoline_pool(this);
1329 GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1330 BlockTrampolinePoolFor(1); // For associated delay slot.
1331 }
1332
1333
bgtz(Register rs,int16_t offset)1334 void Assembler::bgtz(Register rs, int16_t offset) {
1335 BlockTrampolinePoolScope block_trampoline_pool(this);
1336 GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1337 BlockTrampolinePoolFor(1); // For associated delay slot.
1338 }
1339
1340
bgtzc(Register rt,int16_t offset)1341 void Assembler::bgtzc(Register rt, int16_t offset) {
1342 DCHECK(IsMipsArchVariant(kMips32r6));
1343 DCHECK(!(rt.is(zero_reg)));
1344 GenInstrImmediate(BGTZL, zero_reg, rt, offset,
1345 CompactBranchType::COMPACT_BRANCH);
1346 }
1347
1348
blez(Register rs,int16_t offset)1349 void Assembler::blez(Register rs, int16_t offset) {
1350 BlockTrampolinePoolScope block_trampoline_pool(this);
1351 GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1352 BlockTrampolinePoolFor(1); // For associated delay slot.
1353 }
1354
1355
blezc(Register rt,int16_t offset)1356 void Assembler::blezc(Register rt, int16_t offset) {
1357 DCHECK(IsMipsArchVariant(kMips32r6));
1358 DCHECK(!(rt.is(zero_reg)));
1359 GenInstrImmediate(BLEZL, zero_reg, rt, offset,
1360 CompactBranchType::COMPACT_BRANCH);
1361 }
1362
1363
bltzc(Register rt,int16_t offset)1364 void Assembler::bltzc(Register rt, int16_t offset) {
1365 DCHECK(IsMipsArchVariant(kMips32r6));
1366 DCHECK(!rt.is(zero_reg));
1367 GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1368 }
1369
1370
bltuc(Register rs,Register rt,int16_t offset)1371 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1372 DCHECK(IsMipsArchVariant(kMips32r6));
1373 DCHECK(!(rs.is(zero_reg)));
1374 DCHECK(!(rt.is(zero_reg)));
1375 DCHECK(rs.code() != rt.code());
1376 GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1377 }
1378
1379
bltc(Register rs,Register rt,int16_t offset)1380 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1381 DCHECK(IsMipsArchVariant(kMips32r6));
1382 DCHECK(!rs.is(zero_reg));
1383 DCHECK(!rt.is(zero_reg));
1384 DCHECK(rs.code() != rt.code());
1385 GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1386 }
1387
1388
bltz(Register rs,int16_t offset)1389 void Assembler::bltz(Register rs, int16_t offset) {
1390 BlockTrampolinePoolScope block_trampoline_pool(this);
1391 GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1392 BlockTrampolinePoolFor(1); // For associated delay slot.
1393 }
1394
1395
bltzal(Register rs,int16_t offset)1396 void Assembler::bltzal(Register rs, int16_t offset) {
1397 DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
1398 BlockTrampolinePoolScope block_trampoline_pool(this);
1399 GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1400 BlockTrampolinePoolFor(1); // For associated delay slot.
1401 }
1402
1403
bne(Register rs,Register rt,int16_t offset)1404 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1405 BlockTrampolinePoolScope block_trampoline_pool(this);
1406 GenInstrImmediate(BNE, rs, rt, offset);
1407 BlockTrampolinePoolFor(1); // For associated delay slot.
1408 }
1409
1410
bovc(Register rs,Register rt,int16_t offset)1411 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1412 DCHECK(IsMipsArchVariant(kMips32r6));
1413 if (rs.code() >= rt.code()) {
1414 GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1415 } else {
1416 GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1417 }
1418 }
1419
1420
bnvc(Register rs,Register rt,int16_t offset)1421 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1422 DCHECK(IsMipsArchVariant(kMips32r6));
1423 if (rs.code() >= rt.code()) {
1424 GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1425 } else {
1426 GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1427 }
1428 }
1429
1430
blezalc(Register rt,int16_t offset)1431 void Assembler::blezalc(Register rt, int16_t offset) {
1432 DCHECK(IsMipsArchVariant(kMips32r6));
1433 DCHECK(!(rt.is(zero_reg)));
1434 GenInstrImmediate(BLEZ, zero_reg, rt, offset,
1435 CompactBranchType::COMPACT_BRANCH);
1436 }
1437
1438
bgezalc(Register rt,int16_t offset)1439 void Assembler::bgezalc(Register rt, int16_t offset) {
1440 DCHECK(IsMipsArchVariant(kMips32r6));
1441 DCHECK(!(rt.is(zero_reg)));
1442 GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1443 }
1444
1445
bgezall(Register rs,int16_t offset)1446 void Assembler::bgezall(Register rs, int16_t offset) {
1447 DCHECK(!IsMipsArchVariant(kMips32r6));
1448 DCHECK(!(rs.is(zero_reg)));
1449 BlockTrampolinePoolScope block_trampoline_pool(this);
1450 GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1451 BlockTrampolinePoolFor(1); // For associated delay slot.
1452 }
1453
1454
bltzalc(Register rt,int16_t offset)1455 void Assembler::bltzalc(Register rt, int16_t offset) {
1456 DCHECK(IsMipsArchVariant(kMips32r6));
1457 DCHECK(!(rt.is(zero_reg)));
1458 GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1459 }
1460
1461
bgtzalc(Register rt,int16_t offset)1462 void Assembler::bgtzalc(Register rt, int16_t offset) {
1463 DCHECK(IsMipsArchVariant(kMips32r6));
1464 DCHECK(!(rt.is(zero_reg)));
1465 GenInstrImmediate(BGTZ, zero_reg, rt, offset,
1466 CompactBranchType::COMPACT_BRANCH);
1467 }
1468
1469
beqzalc(Register rt,int16_t offset)1470 void Assembler::beqzalc(Register rt, int16_t offset) {
1471 DCHECK(IsMipsArchVariant(kMips32r6));
1472 DCHECK(!(rt.is(zero_reg)));
1473 GenInstrImmediate(ADDI, zero_reg, rt, offset,
1474 CompactBranchType::COMPACT_BRANCH);
1475 }
1476
1477
bnezalc(Register rt,int16_t offset)1478 void Assembler::bnezalc(Register rt, int16_t offset) {
1479 DCHECK(IsMipsArchVariant(kMips32r6));
1480 DCHECK(!(rt.is(zero_reg)));
1481 GenInstrImmediate(DADDI, zero_reg, rt, offset,
1482 CompactBranchType::COMPACT_BRANCH);
1483 }
1484
1485
beqc(Register rs,Register rt,int16_t offset)1486 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1487 DCHECK(IsMipsArchVariant(kMips32r6));
1488 DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1489 if (rs.code() < rt.code()) {
1490 GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1491 } else {
1492 GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1493 }
1494 }
1495
1496
beqzc(Register rs,int32_t offset)1497 void Assembler::beqzc(Register rs, int32_t offset) {
1498 DCHECK(IsMipsArchVariant(kMips32r6));
1499 DCHECK(!(rs.is(zero_reg)));
1500 GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
1501 }
1502
1503
bnec(Register rs,Register rt,int16_t offset)1504 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1505 DCHECK(IsMipsArchVariant(kMips32r6));
1506 DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1507 if (rs.code() < rt.code()) {
1508 GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1509 } else {
1510 GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1511 }
1512 }
1513
1514
bnezc(Register rs,int32_t offset)1515 void Assembler::bnezc(Register rs, int32_t offset) {
1516 DCHECK(IsMipsArchVariant(kMips32r6));
1517 DCHECK(!(rs.is(zero_reg)));
1518 GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
1519 }
1520
1521
j(int32_t target)1522 void Assembler::j(int32_t target) {
1523 #if DEBUG
1524 // Get pc of delay slot.
1525 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1526 bool in_range = ((ipc ^ static_cast<uint32_t>(target)) >>
1527 (kImm26Bits + kImmFieldShift)) == 0;
1528 DCHECK(in_range && ((target & 3) == 0));
1529 #endif
1530 BlockTrampolinePoolScope block_trampoline_pool(this);
1531 GenInstrJump(J, (target >> 2) & kImm26Mask);
1532 BlockTrampolinePoolFor(1); // For associated delay slot.
1533 }
1534
1535
jr(Register rs)1536 void Assembler::jr(Register rs) {
1537 if (!IsMipsArchVariant(kMips32r6)) {
1538 BlockTrampolinePoolScope block_trampoline_pool(this);
1539 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1540 BlockTrampolinePoolFor(1); // For associated delay slot.
1541 } else {
1542 jalr(rs, zero_reg);
1543 }
1544 }
1545
1546
jal(int32_t target)1547 void Assembler::jal(int32_t target) {
1548 #ifdef DEBUG
1549 // Get pc of delay slot.
1550 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1551 bool in_range = ((ipc ^ static_cast<uint32_t>(target)) >>
1552 (kImm26Bits + kImmFieldShift)) == 0;
1553 DCHECK(in_range && ((target & 3) == 0));
1554 #endif
1555 BlockTrampolinePoolScope block_trampoline_pool(this);
1556 GenInstrJump(JAL, (target >> 2) & kImm26Mask);
1557 BlockTrampolinePoolFor(1); // For associated delay slot.
1558 }
1559
1560
jalr(Register rs,Register rd)1561 void Assembler::jalr(Register rs, Register rd) {
1562 DCHECK(rs.code() != rd.code());
1563 BlockTrampolinePoolScope block_trampoline_pool(this);
1564 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1565 BlockTrampolinePoolFor(1); // For associated delay slot.
1566 }
1567
1568
jic(Register rt,int16_t offset)1569 void Assembler::jic(Register rt, int16_t offset) {
1570 DCHECK(IsMipsArchVariant(kMips32r6));
1571 GenInstrImmediate(POP66, zero_reg, rt, offset);
1572 }
1573
1574
jialc(Register rt,int16_t offset)1575 void Assembler::jialc(Register rt, int16_t offset) {
1576 DCHECK(IsMipsArchVariant(kMips32r6));
1577 GenInstrImmediate(POP76, zero_reg, rt, offset);
1578 }
1579
1580
1581 // -------Data-processing-instructions---------
1582
1583 // Arithmetic.
1584
addu(Register rd,Register rs,Register rt)1585 void Assembler::addu(Register rd, Register rs, Register rt) {
1586 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1587 }
1588
1589
addiu(Register rd,Register rs,int32_t j)1590 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1591 GenInstrImmediate(ADDIU, rs, rd, j);
1592 }
1593
1594
subu(Register rd,Register rs,Register rt)1595 void Assembler::subu(Register rd, Register rs, Register rt) {
1596 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1597 }
1598
1599
mul(Register rd,Register rs,Register rt)1600 void Assembler::mul(Register rd, Register rs, Register rt) {
1601 if (!IsMipsArchVariant(kMips32r6)) {
1602 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1603 } else {
1604 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1605 }
1606 }
1607
1608
mulu(Register rd,Register rs,Register rt)1609 void Assembler::mulu(Register rd, Register rs, Register rt) {
1610 DCHECK(IsMipsArchVariant(kMips32r6));
1611 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1612 }
1613
1614
muh(Register rd,Register rs,Register rt)1615 void Assembler::muh(Register rd, Register rs, Register rt) {
1616 DCHECK(IsMipsArchVariant(kMips32r6));
1617 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1618 }
1619
1620
muhu(Register rd,Register rs,Register rt)1621 void Assembler::muhu(Register rd, Register rs, Register rt) {
1622 DCHECK(IsMipsArchVariant(kMips32r6));
1623 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1624 }
1625
1626
mod(Register rd,Register rs,Register rt)1627 void Assembler::mod(Register rd, Register rs, Register rt) {
1628 DCHECK(IsMipsArchVariant(kMips32r6));
1629 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1630 }
1631
1632
modu(Register rd,Register rs,Register rt)1633 void Assembler::modu(Register rd, Register rs, Register rt) {
1634 DCHECK(IsMipsArchVariant(kMips32r6));
1635 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1636 }
1637
1638
mult(Register rs,Register rt)1639 void Assembler::mult(Register rs, Register rt) {
1640 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1641 }
1642
1643
multu(Register rs,Register rt)1644 void Assembler::multu(Register rs, Register rt) {
1645 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1646 }
1647
1648
div(Register rs,Register rt)1649 void Assembler::div(Register rs, Register rt) {
1650 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1651 }
1652
1653
div(Register rd,Register rs,Register rt)1654 void Assembler::div(Register rd, Register rs, Register rt) {
1655 DCHECK(IsMipsArchVariant(kMips32r6));
1656 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1657 }
1658
1659
divu(Register rs,Register rt)1660 void Assembler::divu(Register rs, Register rt) {
1661 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1662 }
1663
1664
divu(Register rd,Register rs,Register rt)1665 void Assembler::divu(Register rd, Register rs, Register rt) {
1666 DCHECK(IsMipsArchVariant(kMips32r6));
1667 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1668 }
1669
1670
1671 // Logical.
1672
and_(Register rd,Register rs,Register rt)1673 void Assembler::and_(Register rd, Register rs, Register rt) {
1674 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1675 }
1676
1677
andi(Register rt,Register rs,int32_t j)1678 void Assembler::andi(Register rt, Register rs, int32_t j) {
1679 DCHECK(is_uint16(j));
1680 GenInstrImmediate(ANDI, rs, rt, j);
1681 }
1682
1683
or_(Register rd,Register rs,Register rt)1684 void Assembler::or_(Register rd, Register rs, Register rt) {
1685 GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1686 }
1687
1688
ori(Register rt,Register rs,int32_t j)1689 void Assembler::ori(Register rt, Register rs, int32_t j) {
1690 DCHECK(is_uint16(j));
1691 GenInstrImmediate(ORI, rs, rt, j);
1692 }
1693
1694
xor_(Register rd,Register rs,Register rt)1695 void Assembler::xor_(Register rd, Register rs, Register rt) {
1696 GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1697 }
1698
1699
xori(Register rt,Register rs,int32_t j)1700 void Assembler::xori(Register rt, Register rs, int32_t j) {
1701 DCHECK(is_uint16(j));
1702 GenInstrImmediate(XORI, rs, rt, j);
1703 }
1704
1705
nor(Register rd,Register rs,Register rt)1706 void Assembler::nor(Register rd, Register rs, Register rt) {
1707 GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1708 }
1709
1710
1711 // Shifts.
sll(Register rd,Register rt,uint16_t sa,bool coming_from_nop)1712 void Assembler::sll(Register rd,
1713 Register rt,
1714 uint16_t sa,
1715 bool coming_from_nop) {
1716 // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1717 // generated using the sll instruction. They must be generated using
1718 // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1719 // instructions.
1720 DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1721 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
1722 }
1723
1724
sllv(Register rd,Register rt,Register rs)1725 void Assembler::sllv(Register rd, Register rt, Register rs) {
1726 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1727 }
1728
1729
srl(Register rd,Register rt,uint16_t sa)1730 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1731 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
1732 }
1733
1734
srlv(Register rd,Register rt,Register rs)1735 void Assembler::srlv(Register rd, Register rt, Register rs) {
1736 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1737 }
1738
1739
sra(Register rd,Register rt,uint16_t sa)1740 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1741 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
1742 }
1743
1744
srav(Register rd,Register rt,Register rs)1745 void Assembler::srav(Register rd, Register rt, Register rs) {
1746 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1747 }
1748
1749
rotr(Register rd,Register rt,uint16_t sa)1750 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1751 // Should be called via MacroAssembler::Ror.
1752 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1753 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1754 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1755 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1756 emit(instr);
1757 }
1758
1759
rotrv(Register rd,Register rt,Register rs)1760 void Assembler::rotrv(Register rd, Register rt, Register rs) {
1761 // Should be called via MacroAssembler::Ror.
1762 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1763 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1764 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1765 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1766 emit(instr);
1767 }
1768
1769
lsa(Register rd,Register rt,Register rs,uint8_t sa)1770 void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
1771 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1772 DCHECK(sa <= 3);
1773 DCHECK(IsMipsArchVariant(kMips32r6));
1774 Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
1775 rd.code() << kRdShift | sa << kSaShift | LSA;
1776 emit(instr);
1777 }
1778
1779
1780 // ------------Memory-instructions-------------
1781
1782 // Helper for base-reg + offset, when offset is larger than int16.
LoadRegPlusOffsetToAt(const MemOperand & src)1783 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1784 DCHECK(!src.rm().is(at));
1785 lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
1786 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1787 addu(at, at, src.rm()); // Add base register.
1788 }
1789
1790
lb(Register rd,const MemOperand & rs)1791 void Assembler::lb(Register rd, const MemOperand& rs) {
1792 if (is_int16(rs.offset_)) {
1793 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1794 } else { // Offset > 16 bits, use multiple instructions to load.
1795 LoadRegPlusOffsetToAt(rs);
1796 GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
1797 }
1798 }
1799
1800
lbu(Register rd,const MemOperand & rs)1801 void Assembler::lbu(Register rd, const MemOperand& rs) {
1802 if (is_int16(rs.offset_)) {
1803 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1804 } else { // Offset > 16 bits, use multiple instructions to load.
1805 LoadRegPlusOffsetToAt(rs);
1806 GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
1807 }
1808 }
1809
1810
lh(Register rd,const MemOperand & rs)1811 void Assembler::lh(Register rd, const MemOperand& rs) {
1812 if (is_int16(rs.offset_)) {
1813 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1814 } else { // Offset > 16 bits, use multiple instructions to load.
1815 LoadRegPlusOffsetToAt(rs);
1816 GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
1817 }
1818 }
1819
1820
lhu(Register rd,const MemOperand & rs)1821 void Assembler::lhu(Register rd, const MemOperand& rs) {
1822 if (is_int16(rs.offset_)) {
1823 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1824 } else { // Offset > 16 bits, use multiple instructions to load.
1825 LoadRegPlusOffsetToAt(rs);
1826 GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
1827 }
1828 }
1829
1830
lw(Register rd,const MemOperand & rs)1831 void Assembler::lw(Register rd, const MemOperand& rs) {
1832 if (is_int16(rs.offset_)) {
1833 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1834 } else { // Offset > 16 bits, use multiple instructions to load.
1835 LoadRegPlusOffsetToAt(rs);
1836 GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1837 }
1838 }
1839
1840
lwl(Register rd,const MemOperand & rs)1841 void Assembler::lwl(Register rd, const MemOperand& rs) {
1842 DCHECK(is_int16(rs.offset_));
1843 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
1844 IsMipsArchVariant(kMips32r2));
1845 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1846 }
1847
1848
lwr(Register rd,const MemOperand & rs)1849 void Assembler::lwr(Register rd, const MemOperand& rs) {
1850 DCHECK(is_int16(rs.offset_));
1851 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
1852 IsMipsArchVariant(kMips32r2));
1853 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1854 }
1855
1856
sb(Register rd,const MemOperand & rs)1857 void Assembler::sb(Register rd, const MemOperand& rs) {
1858 if (is_int16(rs.offset_)) {
1859 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1860 } else { // Offset > 16 bits, use multiple instructions to store.
1861 LoadRegPlusOffsetToAt(rs);
1862 GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
1863 }
1864 }
1865
1866
sh(Register rd,const MemOperand & rs)1867 void Assembler::sh(Register rd, const MemOperand& rs) {
1868 if (is_int16(rs.offset_)) {
1869 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1870 } else { // Offset > 16 bits, use multiple instructions to store.
1871 LoadRegPlusOffsetToAt(rs);
1872 GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
1873 }
1874 }
1875
1876
sw(Register rd,const MemOperand & rs)1877 void Assembler::sw(Register rd, const MemOperand& rs) {
1878 if (is_int16(rs.offset_)) {
1879 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1880 } else { // Offset > 16 bits, use multiple instructions to store.
1881 LoadRegPlusOffsetToAt(rs);
1882 GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1883 }
1884 }
1885
1886
swl(Register rd,const MemOperand & rs)1887 void Assembler::swl(Register rd, const MemOperand& rs) {
1888 DCHECK(is_int16(rs.offset_));
1889 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
1890 IsMipsArchVariant(kMips32r2));
1891 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1892 }
1893
1894
swr(Register rd,const MemOperand & rs)1895 void Assembler::swr(Register rd, const MemOperand& rs) {
1896 DCHECK(is_int16(rs.offset_));
1897 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
1898 IsMipsArchVariant(kMips32r2));
1899 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1900 }
1901
1902
lui(Register rd,int32_t j)1903 void Assembler::lui(Register rd, int32_t j) {
1904 DCHECK(is_uint16(j));
1905 GenInstrImmediate(LUI, zero_reg, rd, j);
1906 }
1907
1908
aui(Register rt,Register rs,int32_t j)1909 void Assembler::aui(Register rt, Register rs, int32_t j) {
1910 // This instruction uses same opcode as 'lui'. The difference in encoding is
1911 // 'lui' has zero reg. for rs field.
1912 DCHECK(!(rs.is(zero_reg)));
1913 DCHECK(is_uint16(j));
1914 GenInstrImmediate(LUI, rs, rt, j);
1915 }
1916
1917 // ---------PC-Relative instructions-----------
1918
addiupc(Register rs,int32_t imm19)1919 void Assembler::addiupc(Register rs, int32_t imm19) {
1920 DCHECK(IsMipsArchVariant(kMips32r6));
1921 DCHECK(rs.is_valid() && is_int19(imm19));
1922 uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
1923 GenInstrImmediate(PCREL, rs, imm21);
1924 }
1925
1926
lwpc(Register rs,int32_t offset19)1927 void Assembler::lwpc(Register rs, int32_t offset19) {
1928 DCHECK(IsMipsArchVariant(kMips32r6));
1929 DCHECK(rs.is_valid() && is_int19(offset19));
1930 uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
1931 GenInstrImmediate(PCREL, rs, imm21);
1932 }
1933
1934
auipc(Register rs,int16_t imm16)1935 void Assembler::auipc(Register rs, int16_t imm16) {
1936 DCHECK(IsMipsArchVariant(kMips32r6));
1937 DCHECK(rs.is_valid());
1938 uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
1939 GenInstrImmediate(PCREL, rs, imm21);
1940 }
1941
1942
aluipc(Register rs,int16_t imm16)1943 void Assembler::aluipc(Register rs, int16_t imm16) {
1944 DCHECK(IsMipsArchVariant(kMips32r6));
1945 DCHECK(rs.is_valid());
1946 uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
1947 GenInstrImmediate(PCREL, rs, imm21);
1948 }
1949
1950
1951 // -------------Misc-instructions--------------
1952
1953 // Break / Trap instructions.
break_(uint32_t code,bool break_as_stop)1954 void Assembler::break_(uint32_t code, bool break_as_stop) {
1955 DCHECK((code & ~0xfffff) == 0);
1956 // We need to invalidate breaks that could be stops as well because the
1957 // simulator expects a char pointer after the stop instruction.
1958 // See constants-mips.h for explanation.
1959 DCHECK((break_as_stop &&
1960 code <= kMaxStopCode &&
1961 code > kMaxWatchpointCode) ||
1962 (!break_as_stop &&
1963 (code > kMaxStopCode ||
1964 code <= kMaxWatchpointCode)));
1965 Instr break_instr = SPECIAL | BREAK | (code << 6);
1966 emit(break_instr);
1967 }
1968
1969
stop(const char * msg,uint32_t code)1970 void Assembler::stop(const char* msg, uint32_t code) {
1971 DCHECK(code > kMaxWatchpointCode);
1972 DCHECK(code <= kMaxStopCode);
1973 #if V8_HOST_ARCH_MIPS
1974 break_(0x54321);
1975 #else // V8_HOST_ARCH_MIPS
1976 BlockTrampolinePoolFor(2);
1977 // The Simulator will handle the stop instruction and get the message address.
1978 // On MIPS stop() is just a special kind of break_().
1979 break_(code, true);
1980 emit(reinterpret_cast<Instr>(msg));
1981 #endif
1982 }
1983
1984
tge(Register rs,Register rt,uint16_t code)1985 void Assembler::tge(Register rs, Register rt, uint16_t code) {
1986 DCHECK(is_uint10(code));
1987 Instr instr = SPECIAL | TGE | rs.code() << kRsShift
1988 | rt.code() << kRtShift | code << 6;
1989 emit(instr);
1990 }
1991
1992
tgeu(Register rs,Register rt,uint16_t code)1993 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
1994 DCHECK(is_uint10(code));
1995 Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
1996 | rt.code() << kRtShift | code << 6;
1997 emit(instr);
1998 }
1999
2000
tlt(Register rs,Register rt,uint16_t code)2001 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
2002 DCHECK(is_uint10(code));
2003 Instr instr =
2004 SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2005 emit(instr);
2006 }
2007
2008
tltu(Register rs,Register rt,uint16_t code)2009 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
2010 DCHECK(is_uint10(code));
2011 Instr instr =
2012 SPECIAL | TLTU | rs.code() << kRsShift
2013 | rt.code() << kRtShift | code << 6;
2014 emit(instr);
2015 }
2016
2017
teq(Register rs,Register rt,uint16_t code)2018 void Assembler::teq(Register rs, Register rt, uint16_t code) {
2019 DCHECK(is_uint10(code));
2020 Instr instr =
2021 SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2022 emit(instr);
2023 }
2024
2025
tne(Register rs,Register rt,uint16_t code)2026 void Assembler::tne(Register rs, Register rt, uint16_t code) {
2027 DCHECK(is_uint10(code));
2028 Instr instr =
2029 SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2030 emit(instr);
2031 }
2032
sync()2033 void Assembler::sync() {
2034 Instr sync_instr = SPECIAL | SYNC;
2035 emit(sync_instr);
2036 }
2037
2038 // Move from HI/LO register.
2039
mfhi(Register rd)2040 void Assembler::mfhi(Register rd) {
2041 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
2042 }
2043
2044
mflo(Register rd)2045 void Assembler::mflo(Register rd) {
2046 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
2047 }
2048
2049
2050 // Set on less than instructions.
slt(Register rd,Register rs,Register rt)2051 void Assembler::slt(Register rd, Register rs, Register rt) {
2052 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
2053 }
2054
2055
sltu(Register rd,Register rs,Register rt)2056 void Assembler::sltu(Register rd, Register rs, Register rt) {
2057 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
2058 }
2059
2060
slti(Register rt,Register rs,int32_t j)2061 void Assembler::slti(Register rt, Register rs, int32_t j) {
2062 GenInstrImmediate(SLTI, rs, rt, j);
2063 }
2064
2065
sltiu(Register rt,Register rs,int32_t j)2066 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
2067 GenInstrImmediate(SLTIU, rs, rt, j);
2068 }
2069
2070
2071 // Conditional move.
movz(Register rd,Register rs,Register rt)2072 void Assembler::movz(Register rd, Register rs, Register rt) {
2073 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2074 }
2075
2076
movn(Register rd,Register rs,Register rt)2077 void Assembler::movn(Register rd, Register rs, Register rt) {
2078 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2079 }
2080
2081
movt(Register rd,Register rs,uint16_t cc)2082 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2083 Register rt;
2084 rt.reg_code = (cc & 0x0007) << 2 | 1;
2085 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2086 }
2087
2088
movf(Register rd,Register rs,uint16_t cc)2089 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2090 Register rt;
2091 rt.reg_code = (cc & 0x0007) << 2 | 0;
2092 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2093 }
2094
2095
seleqz(Register rd,Register rs,Register rt)2096 void Assembler::seleqz(Register rd, Register rs, Register rt) {
2097 DCHECK(IsMipsArchVariant(kMips32r6));
2098 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2099 }
2100
2101
2102 // Bit twiddling.
clz(Register rd,Register rs)2103 void Assembler::clz(Register rd, Register rs) {
2104 if (!IsMipsArchVariant(kMips32r6)) {
2105 // Clz instr requires same GPR number in 'rd' and 'rt' fields.
2106 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2107 } else {
2108 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2109 }
2110 }
2111
2112
ins_(Register rt,Register rs,uint16_t pos,uint16_t size)2113 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2114 // Should be called via MacroAssembler::Ins.
2115 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2116 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2117 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2118 }
2119
2120
ext_(Register rt,Register rs,uint16_t pos,uint16_t size)2121 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2122 // Should be called via MacroAssembler::Ext.
2123 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
2124 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2125 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2126 }
2127
2128
bitswap(Register rd,Register rt)2129 void Assembler::bitswap(Register rd, Register rt) {
2130 DCHECK(IsMipsArchVariant(kMips32r6));
2131 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
2132 }
2133
2134
pref(int32_t hint,const MemOperand & rs)2135 void Assembler::pref(int32_t hint, const MemOperand& rs) {
2136 DCHECK(!IsMipsArchVariant(kLoongson));
2137 DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2138 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
2139 | (rs.offset_);
2140 emit(instr);
2141 }
2142
2143
align(Register rd,Register rs,Register rt,uint8_t bp)2144 void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
2145 DCHECK(IsMipsArchVariant(kMips32r6));
2146 DCHECK(is_uint3(bp));
2147 uint16_t sa = (ALIGN << kBp2Bits) | bp;
2148 GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
2149 }
2150
2151 // Byte swap.
wsbh(Register rd,Register rt)2152 void Assembler::wsbh(Register rd, Register rt) {
2153 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2154 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL);
2155 }
2156
seh(Register rd,Register rt)2157 void Assembler::seh(Register rd, Register rt) {
2158 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2159 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL);
2160 }
2161
seb(Register rd,Register rt)2162 void Assembler::seb(Register rd, Register rt) {
2163 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2164 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
2165 }
2166
2167 // --------Coprocessor-instructions----------------
2168
2169 // Load, store, move.
lwc1(FPURegister fd,const MemOperand & src)2170 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
2171 if (is_int16(src.offset_)) {
2172 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
2173 } else { // Offset > 16 bits, use multiple instructions to load.
2174 LoadRegPlusOffsetToAt(src);
2175 GenInstrImmediate(LWC1, at, fd, 0);
2176 }
2177 }
2178
2179
ldc1(FPURegister fd,const MemOperand & src)2180 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
2181 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
2182 // load to two 32-bit loads.
2183 if (IsFp32Mode()) { // fp32 mode.
2184 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2185 GenInstrImmediate(LWC1, src.rm(), fd,
2186 src.offset_ + Register::kMantissaOffset);
2187 FPURegister nextfpreg;
2188 nextfpreg.setcode(fd.code() + 1);
2189 GenInstrImmediate(LWC1, src.rm(), nextfpreg,
2190 src.offset_ + Register::kExponentOffset);
2191 } else { // Offset > 16 bits, use multiple instructions to load.
2192 LoadRegPlusOffsetToAt(src);
2193 GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
2194 FPURegister nextfpreg;
2195 nextfpreg.setcode(fd.code() + 1);
2196 GenInstrImmediate(LWC1, at, nextfpreg, Register::kExponentOffset);
2197 }
2198 } else {
2199 DCHECK(IsFp64Mode() || IsFpxxMode());
2200 // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
2201 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2202 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2203 GenInstrImmediate(LWC1, src.rm(), fd,
2204 src.offset_ + Register::kMantissaOffset);
2205 GenInstrImmediate(LW, src.rm(), at,
2206 src.offset_ + Register::kExponentOffset);
2207 mthc1(at, fd);
2208 } else { // Offset > 16 bits, use multiple instructions to load.
2209 LoadRegPlusOffsetToAt(src);
2210 GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
2211 GenInstrImmediate(LW, at, at, Register::kExponentOffset);
2212 mthc1(at, fd);
2213 }
2214 }
2215 }
2216
2217
swc1(FPURegister fd,const MemOperand & src)2218 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
2219 if (is_int16(src.offset_)) {
2220 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
2221 } else { // Offset > 16 bits, use multiple instructions to load.
2222 LoadRegPlusOffsetToAt(src);
2223 GenInstrImmediate(SWC1, at, fd, 0);
2224 }
2225 }
2226
2227
sdc1(FPURegister fd,const MemOperand & src)2228 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
2229 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
2230 // store to two 32-bit stores.
2231 DCHECK(!src.rm().is(at));
2232 DCHECK(!src.rm().is(t8));
2233 if (IsFp32Mode()) { // fp32 mode.
2234 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2235 GenInstrImmediate(SWC1, src.rm(), fd,
2236 src.offset_ + Register::kMantissaOffset);
2237 FPURegister nextfpreg;
2238 nextfpreg.setcode(fd.code() + 1);
2239 GenInstrImmediate(SWC1, src.rm(), nextfpreg,
2240 src.offset_ + Register::kExponentOffset);
2241 } else { // Offset > 16 bits, use multiple instructions to load.
2242 LoadRegPlusOffsetToAt(src);
2243 GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
2244 FPURegister nextfpreg;
2245 nextfpreg.setcode(fd.code() + 1);
2246 GenInstrImmediate(SWC1, at, nextfpreg, Register::kExponentOffset);
2247 }
2248 } else {
2249 DCHECK(IsFp64Mode() || IsFpxxMode());
2250 // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
2251 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2252 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2253 GenInstrImmediate(SWC1, src.rm(), fd,
2254 src.offset_ + Register::kMantissaOffset);
2255 mfhc1(at, fd);
2256 GenInstrImmediate(SW, src.rm(), at,
2257 src.offset_ + Register::kExponentOffset);
2258 } else { // Offset > 16 bits, use multiple instructions to load.
2259 LoadRegPlusOffsetToAt(src);
2260 GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
2261 mfhc1(t8, fd);
2262 GenInstrImmediate(SW, at, t8, Register::kExponentOffset);
2263 }
2264 }
2265 }
2266
2267
mtc1(Register rt,FPURegister fs)2268 void Assembler::mtc1(Register rt, FPURegister fs) {
2269 GenInstrRegister(COP1, MTC1, rt, fs, f0);
2270 }
2271
2272
mthc1(Register rt,FPURegister fs)2273 void Assembler::mthc1(Register rt, FPURegister fs) {
2274 GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2275 }
2276
2277
mfc1(Register rt,FPURegister fs)2278 void Assembler::mfc1(Register rt, FPURegister fs) {
2279 GenInstrRegister(COP1, MFC1, rt, fs, f0);
2280 }
2281
2282
mfhc1(Register rt,FPURegister fs)2283 void Assembler::mfhc1(Register rt, FPURegister fs) {
2284 GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2285 }
2286
2287
ctc1(Register rt,FPUControlRegister fs)2288 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2289 GenInstrRegister(COP1, CTC1, rt, fs);
2290 }
2291
2292
cfc1(Register rt,FPUControlRegister fs)2293 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2294 GenInstrRegister(COP1, CFC1, rt, fs);
2295 }
2296
2297
DoubleAsTwoUInt32(double d,uint32_t * lo,uint32_t * hi)2298 void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2299 uint64_t i;
2300 memcpy(&i, &d, 8);
2301
2302 *lo = i & 0xffffffff;
2303 *hi = i >> 32;
2304 }
2305
2306
movn_s(FPURegister fd,FPURegister fs,Register rt)2307 void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
2308 DCHECK(!IsMipsArchVariant(kMips32r6));
2309 GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
2310 }
2311
2312
movn_d(FPURegister fd,FPURegister fs,Register rt)2313 void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
2314 DCHECK(!IsMipsArchVariant(kMips32r6));
2315 GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
2316 }
2317
2318
sel(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2319 void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
2320 FPURegister ft) {
2321 DCHECK(IsMipsArchVariant(kMips32r6));
2322 DCHECK((fmt == D) || (fmt == S));
2323
2324 GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
2325 }
2326
2327
sel_s(FPURegister fd,FPURegister fs,FPURegister ft)2328 void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2329 sel(S, fd, fs, ft);
2330 }
2331
2332
sel_d(FPURegister fd,FPURegister fs,FPURegister ft)2333 void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2334 sel(D, fd, fs, ft);
2335 }
2336
2337
seleqz(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2338 void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
2339 FPURegister ft) {
2340 DCHECK(IsMipsArchVariant(kMips32r6));
2341 DCHECK((fmt == D) || (fmt == S));
2342 GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
2343 }
2344
2345
selnez(Register rd,Register rs,Register rt)2346 void Assembler::selnez(Register rd, Register rs, Register rt) {
2347 DCHECK(IsMipsArchVariant(kMips32r6));
2348 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2349 }
2350
2351
selnez(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2352 void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
2353 FPURegister ft) {
2354 DCHECK(IsMipsArchVariant(kMips32r6));
2355 DCHECK((fmt == D) || (fmt == S));
2356 GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
2357 }
2358
2359
seleqz_d(FPURegister fd,FPURegister fs,FPURegister ft)2360 void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2361 seleqz(D, fd, fs, ft);
2362 }
2363
2364
seleqz_s(FPURegister fd,FPURegister fs,FPURegister ft)2365 void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2366 seleqz(S, fd, fs, ft);
2367 }
2368
2369
selnez_d(FPURegister fd,FPURegister fs,FPURegister ft)2370 void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2371 selnez(D, fd, fs, ft);
2372 }
2373
2374
selnez_s(FPURegister fd,FPURegister fs,FPURegister ft)2375 void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2376 selnez(S, fd, fs, ft);
2377 }
2378
2379
movz_s(FPURegister fd,FPURegister fs,Register rt)2380 void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
2381 DCHECK(!IsMipsArchVariant(kMips32r6));
2382 GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
2383 }
2384
2385
movz_d(FPURegister fd,FPURegister fs,Register rt)2386 void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
2387 DCHECK(!IsMipsArchVariant(kMips32r6));
2388 GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
2389 }
2390
2391
movt_s(FPURegister fd,FPURegister fs,uint16_t cc)2392 void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2393 DCHECK(!IsMipsArchVariant(kMips32r6));
2394 FPURegister ft;
2395 ft.reg_code = (cc & 0x0007) << 2 | 1;
2396 GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2397 }
2398
2399
movt_d(FPURegister fd,FPURegister fs,uint16_t cc)2400 void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2401 DCHECK(!IsMipsArchVariant(kMips32r6));
2402 FPURegister ft;
2403 ft.reg_code = (cc & 0x0007) << 2 | 1;
2404 GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2405 }
2406
2407
movf_s(FPURegister fd,FPURegister fs,uint16_t cc)2408 void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2409 DCHECK(!IsMipsArchVariant(kMips32r6));
2410 FPURegister ft;
2411 ft.reg_code = (cc & 0x0007) << 2 | 0;
2412 GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2413 }
2414
2415
movf_d(FPURegister fd,FPURegister fs,uint16_t cc)2416 void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2417 DCHECK(!IsMipsArchVariant(kMips32r6));
2418 FPURegister ft;
2419 ft.reg_code = (cc & 0x0007) << 2 | 0;
2420 GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2421 }
2422
2423
2424 // Arithmetic.
2425
add_s(FPURegister fd,FPURegister fs,FPURegister ft)2426 void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2427 GenInstrRegister(COP1, S, ft, fs, fd, ADD_S);
2428 }
2429
2430
add_d(FPURegister fd,FPURegister fs,FPURegister ft)2431 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2432 GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2433 }
2434
2435
sub_s(FPURegister fd,FPURegister fs,FPURegister ft)2436 void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2437 GenInstrRegister(COP1, S, ft, fs, fd, SUB_S);
2438 }
2439
2440
sub_d(FPURegister fd,FPURegister fs,FPURegister ft)2441 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2442 GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2443 }
2444
2445
mul_s(FPURegister fd,FPURegister fs,FPURegister ft)2446 void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2447 GenInstrRegister(COP1, S, ft, fs, fd, MUL_S);
2448 }
2449
2450
mul_d(FPURegister fd,FPURegister fs,FPURegister ft)2451 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2452 GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2453 }
2454
2455
madd_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft)2456 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2457 FPURegister ft) {
2458 DCHECK(IsMipsArchVariant(kMips32r2));
2459 GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
2460 }
2461
2462
div_s(FPURegister fd,FPURegister fs,FPURegister ft)2463 void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2464 GenInstrRegister(COP1, S, ft, fs, fd, DIV_S);
2465 }
2466
2467
div_d(FPURegister fd,FPURegister fs,FPURegister ft)2468 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2469 GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2470 }
2471
2472
abs_s(FPURegister fd,FPURegister fs)2473 void Assembler::abs_s(FPURegister fd, FPURegister fs) {
2474 GenInstrRegister(COP1, S, f0, fs, fd, ABS_S);
2475 }
2476
2477
abs_d(FPURegister fd,FPURegister fs)2478 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2479 GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2480 }
2481
2482
mov_d(FPURegister fd,FPURegister fs)2483 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2484 GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2485 }
2486
2487
mov_s(FPURegister fd,FPURegister fs)2488 void Assembler::mov_s(FPURegister fd, FPURegister fs) {
2489 GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
2490 }
2491
2492
neg_s(FPURegister fd,FPURegister fs)2493 void Assembler::neg_s(FPURegister fd, FPURegister fs) {
2494 GenInstrRegister(COP1, S, f0, fs, fd, NEG_S);
2495 }
2496
2497
neg_d(FPURegister fd,FPURegister fs)2498 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2499 GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2500 }
2501
2502
sqrt_s(FPURegister fd,FPURegister fs)2503 void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
2504 GenInstrRegister(COP1, S, f0, fs, fd, SQRT_S);
2505 }
2506
2507
sqrt_d(FPURegister fd,FPURegister fs)2508 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2509 GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
2510 }
2511
2512
rsqrt_s(FPURegister fd,FPURegister fs)2513 void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
2514 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2515 GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
2516 }
2517
2518
rsqrt_d(FPURegister fd,FPURegister fs)2519 void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
2520 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2521 GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
2522 }
2523
2524
recip_d(FPURegister fd,FPURegister fs)2525 void Assembler::recip_d(FPURegister fd, FPURegister fs) {
2526 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2527 GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
2528 }
2529
2530
recip_s(FPURegister fd,FPURegister fs)2531 void Assembler::recip_s(FPURegister fd, FPURegister fs) {
2532 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2533 GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
2534 }
2535
2536
2537 // Conversions.
2538
cvt_w_s(FPURegister fd,FPURegister fs)2539 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2540 GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2541 }
2542
2543
cvt_w_d(FPURegister fd,FPURegister fs)2544 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2545 GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2546 }
2547
2548
trunc_w_s(FPURegister fd,FPURegister fs)2549 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2550 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2551 }
2552
2553
trunc_w_d(FPURegister fd,FPURegister fs)2554 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2555 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2556 }
2557
2558
round_w_s(FPURegister fd,FPURegister fs)2559 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2560 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2561 }
2562
2563
round_w_d(FPURegister fd,FPURegister fs)2564 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2565 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2566 }
2567
2568
floor_w_s(FPURegister fd,FPURegister fs)2569 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2570 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2571 }
2572
2573
floor_w_d(FPURegister fd,FPURegister fs)2574 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2575 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2576 }
2577
2578
ceil_w_s(FPURegister fd,FPURegister fs)2579 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2580 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2581 }
2582
2583
ceil_w_d(FPURegister fd,FPURegister fs)2584 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2585 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2586 }
2587
2588
rint_s(FPURegister fd,FPURegister fs)2589 void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
2590
2591
rint(SecondaryField fmt,FPURegister fd,FPURegister fs)2592 void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
2593 DCHECK(IsMipsArchVariant(kMips32r6));
2594 DCHECK((fmt == D) || (fmt == S));
2595 GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
2596 }
2597
2598
rint_d(FPURegister fd,FPURegister fs)2599 void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
2600
2601
cvt_l_s(FPURegister fd,FPURegister fs)2602 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
2603 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2604 IsFp64Mode());
2605 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
2606 }
2607
2608
cvt_l_d(FPURegister fd,FPURegister fs)2609 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
2610 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2611 IsFp64Mode());
2612 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
2613 }
2614
2615
trunc_l_s(FPURegister fd,FPURegister fs)2616 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
2617 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2618 IsFp64Mode());
2619 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
2620 }
2621
2622
trunc_l_d(FPURegister fd,FPURegister fs)2623 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
2624 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2625 IsFp64Mode());
2626 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
2627 }
2628
2629
round_l_s(FPURegister fd,FPURegister fs)2630 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
2631 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2632 IsFp64Mode());
2633 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
2634 }
2635
2636
round_l_d(FPURegister fd,FPURegister fs)2637 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
2638 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2639 IsFp64Mode());
2640 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
2641 }
2642
2643
floor_l_s(FPURegister fd,FPURegister fs)2644 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
2645 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2646 IsFp64Mode());
2647 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
2648 }
2649
2650
floor_l_d(FPURegister fd,FPURegister fs)2651 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
2652 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2653 IsFp64Mode());
2654 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
2655 }
2656
2657
ceil_l_s(FPURegister fd,FPURegister fs)2658 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
2659 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2660 IsFp64Mode());
2661 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
2662 }
2663
2664
ceil_l_d(FPURegister fd,FPURegister fs)2665 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
2666 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2667 IsFp64Mode());
2668 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
2669 }
2670
2671
class_s(FPURegister fd,FPURegister fs)2672 void Assembler::class_s(FPURegister fd, FPURegister fs) {
2673 DCHECK(IsMipsArchVariant(kMips32r6));
2674 GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
2675 }
2676
2677
class_d(FPURegister fd,FPURegister fs)2678 void Assembler::class_d(FPURegister fd, FPURegister fs) {
2679 DCHECK(IsMipsArchVariant(kMips32r6));
2680 GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
2681 }
2682
2683
min(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2684 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
2685 FPURegister ft) {
2686 DCHECK(IsMipsArchVariant(kMips32r6));
2687 DCHECK((fmt == D) || (fmt == S));
2688 GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2689 }
2690
2691
mina(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2692 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
2693 FPURegister ft) {
2694 DCHECK(IsMipsArchVariant(kMips32r6));
2695 DCHECK((fmt == D) || (fmt == S));
2696 GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
2697 }
2698
2699
max(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2700 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
2701 FPURegister ft) {
2702 DCHECK(IsMipsArchVariant(kMips32r6));
2703 DCHECK((fmt == D) || (fmt == S));
2704 GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2705 }
2706
2707
maxa(SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2708 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
2709 FPURegister ft) {
2710 DCHECK(IsMipsArchVariant(kMips32r6));
2711 DCHECK((fmt == D) || (fmt == S));
2712 GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
2713 }
2714
2715
min_s(FPURegister fd,FPURegister fs,FPURegister ft)2716 void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2717 min(S, fd, fs, ft);
2718 }
2719
2720
min_d(FPURegister fd,FPURegister fs,FPURegister ft)2721 void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2722 min(D, fd, fs, ft);
2723 }
2724
2725
max_s(FPURegister fd,FPURegister fs,FPURegister ft)2726 void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2727 max(S, fd, fs, ft);
2728 }
2729
2730
max_d(FPURegister fd,FPURegister fs,FPURegister ft)2731 void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2732 max(D, fd, fs, ft);
2733 }
2734
2735
mina_s(FPURegister fd,FPURegister fs,FPURegister ft)2736 void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2737 mina(S, fd, fs, ft);
2738 }
2739
2740
mina_d(FPURegister fd,FPURegister fs,FPURegister ft)2741 void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2742 mina(D, fd, fs, ft);
2743 }
2744
2745
maxa_s(FPURegister fd,FPURegister fs,FPURegister ft)2746 void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2747 maxa(S, fd, fs, ft);
2748 }
2749
2750
maxa_d(FPURegister fd,FPURegister fs,FPURegister ft)2751 void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2752 maxa(D, fd, fs, ft);
2753 }
2754
2755
cvt_s_w(FPURegister fd,FPURegister fs)2756 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
2757 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
2758 }
2759
2760
cvt_s_l(FPURegister fd,FPURegister fs)2761 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
2762 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2763 IsFp64Mode());
2764 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
2765 }
2766
2767
cvt_s_d(FPURegister fd,FPURegister fs)2768 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
2769 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
2770 }
2771
2772
cvt_d_w(FPURegister fd,FPURegister fs)2773 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
2774 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
2775 }
2776
2777
cvt_d_l(FPURegister fd,FPURegister fs)2778 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
2779 DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2780 IsFp64Mode());
2781 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
2782 }
2783
2784
cvt_d_s(FPURegister fd,FPURegister fs)2785 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
2786 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
2787 }
2788
2789
2790 // Conditions for >= MIPSr6.
cmp(FPUCondition cond,SecondaryField fmt,FPURegister fd,FPURegister fs,FPURegister ft)2791 void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
2792 FPURegister fd, FPURegister fs, FPURegister ft) {
2793 DCHECK(IsMipsArchVariant(kMips32r6));
2794 DCHECK((fmt & ~(31 << kRsShift)) == 0);
2795 Instr instr = COP1 | fmt | ft.code() << kFtShift |
2796 fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
2797 emit(instr);
2798 }
2799
2800
cmp_s(FPUCondition cond,FPURegister fd,FPURegister fs,FPURegister ft)2801 void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
2802 FPURegister ft) {
2803 cmp(cond, W, fd, fs, ft);
2804 }
2805
cmp_d(FPUCondition cond,FPURegister fd,FPURegister fs,FPURegister ft)2806 void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
2807 FPURegister ft) {
2808 cmp(cond, L, fd, fs, ft);
2809 }
2810
2811
bc1eqz(int16_t offset,FPURegister ft)2812 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
2813 DCHECK(IsMipsArchVariant(kMips32r6));
2814 Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
2815 emit(instr);
2816 }
2817
2818
bc1nez(int16_t offset,FPURegister ft)2819 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
2820 DCHECK(IsMipsArchVariant(kMips32r6));
2821 Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
2822 emit(instr);
2823 }
2824
2825
2826 // Conditions for < MIPSr6.
c(FPUCondition cond,SecondaryField fmt,FPURegister fs,FPURegister ft,uint16_t cc)2827 void Assembler::c(FPUCondition cond, SecondaryField fmt,
2828 FPURegister fs, FPURegister ft, uint16_t cc) {
2829 DCHECK(is_uint3(cc));
2830 DCHECK(fmt == S || fmt == D);
2831 DCHECK((fmt & ~(31 << kRsShift)) == 0);
2832 Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
2833 | cc << 8 | 3 << 4 | cond;
2834 emit(instr);
2835 }
2836
2837
c_s(FPUCondition cond,FPURegister fs,FPURegister ft,uint16_t cc)2838 void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
2839 uint16_t cc) {
2840 c(cond, S, fs, ft, cc);
2841 }
2842
2843
c_d(FPUCondition cond,FPURegister fs,FPURegister ft,uint16_t cc)2844 void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
2845 uint16_t cc) {
2846 c(cond, D, fs, ft, cc);
2847 }
2848
2849
fcmp(FPURegister src1,const double src2,FPUCondition cond)2850 void Assembler::fcmp(FPURegister src1, const double src2,
2851 FPUCondition cond) {
2852 DCHECK(src2 == 0.0);
2853 mtc1(zero_reg, f14);
2854 cvt_d_w(f14, f14);
2855 c(cond, D, src1, f14, 0);
2856 }
2857
2858
bc1f(int16_t offset,uint16_t cc)2859 void Assembler::bc1f(int16_t offset, uint16_t cc) {
2860 DCHECK(is_uint3(cc));
2861 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
2862 emit(instr);
2863 }
2864
2865
bc1t(int16_t offset,uint16_t cc)2866 void Assembler::bc1t(int16_t offset, uint16_t cc) {
2867 DCHECK(is_uint3(cc));
2868 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
2869 emit(instr);
2870 }
2871
2872
RelocateInternalReference(RelocInfo::Mode rmode,byte * pc,intptr_t pc_delta)2873 int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
2874 intptr_t pc_delta) {
2875 Instr instr = instr_at(pc);
2876
2877 if (RelocInfo::IsInternalReference(rmode)) {
2878 int32_t* p = reinterpret_cast<int32_t*>(pc);
2879 if (*p == 0) {
2880 return 0; // Number of instructions patched.
2881 }
2882 *p += pc_delta;
2883 return 1; // Number of instructions patched.
2884 } else {
2885 DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
2886 if (IsLui(instr)) {
2887 Instr instr1 = instr_at(pc + 0 * Assembler::kInstrSize);
2888 Instr instr2 = instr_at(pc + 1 * Assembler::kInstrSize);
2889 DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
2890 int32_t imm;
2891 if (IsJicOrJialc(instr2)) {
2892 imm = CreateTargetAddress(instr1, instr2);
2893 } else {
2894 imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
2895 imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
2896 }
2897
2898 if (imm == kEndOfJumpChain) {
2899 return 0; // Number of instructions patched.
2900 }
2901 imm += pc_delta;
2902 DCHECK((imm & 3) == 0);
2903 instr1 &= ~kImm16Mask;
2904 instr2 &= ~kImm16Mask;
2905
2906 if (IsJicOrJialc(instr2)) {
2907 uint32_t lui_offset_u, jic_offset_u;
2908 Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
2909 instr_at_put(pc + 0 * Assembler::kInstrSize, instr1 | lui_offset_u);
2910 instr_at_put(pc + 1 * Assembler::kInstrSize, instr2 | jic_offset_u);
2911 } else {
2912 instr_at_put(pc + 0 * Assembler::kInstrSize,
2913 instr1 | ((imm >> kLuiShift) & kImm16Mask));
2914 instr_at_put(pc + 1 * Assembler::kInstrSize,
2915 instr2 | (imm & kImm16Mask));
2916 }
2917 return 2; // Number of instructions patched.
2918 } else {
2919 UNREACHABLE();
2920 return 0;
2921 }
2922 }
2923 }
2924
2925
GrowBuffer()2926 void Assembler::GrowBuffer() {
2927 if (!own_buffer_) FATAL("external code buffer is too small");
2928
2929 // Compute new buffer size.
2930 CodeDesc desc; // The new buffer.
2931 if (buffer_size_ < 1 * MB) {
2932 desc.buffer_size = 2*buffer_size_;
2933 } else {
2934 desc.buffer_size = buffer_size_ + 1*MB;
2935 }
2936 CHECK_GT(desc.buffer_size, 0); // No overflow.
2937
2938 // Set up new buffer.
2939 desc.buffer = NewArray<byte>(desc.buffer_size);
2940 desc.origin = this;
2941
2942 desc.instr_size = pc_offset();
2943 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2944
2945 // Copy the data.
2946 int pc_delta = desc.buffer - buffer_;
2947 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2948 MemMove(desc.buffer, buffer_, desc.instr_size);
2949 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
2950 desc.reloc_size);
2951
2952 // Switch buffers.
2953 DeleteArray(buffer_);
2954 buffer_ = desc.buffer;
2955 buffer_size_ = desc.buffer_size;
2956 pc_ += pc_delta;
2957 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2958 reloc_info_writer.last_pc() + pc_delta);
2959
2960 // Relocate runtime entries.
2961 for (RelocIterator it(desc); !it.done(); it.next()) {
2962 RelocInfo::Mode rmode = it.rinfo()->rmode();
2963 if (rmode == RelocInfo::INTERNAL_REFERENCE_ENCODED ||
2964 rmode == RelocInfo::INTERNAL_REFERENCE) {
2965 byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
2966 RelocateInternalReference(rmode, p, pc_delta);
2967 }
2968 }
2969 DCHECK(!overflow());
2970 }
2971
2972
db(uint8_t data)2973 void Assembler::db(uint8_t data) {
2974 CheckForEmitInForbiddenSlot();
2975 EmitHelper(data);
2976 }
2977
2978
dd(uint32_t data)2979 void Assembler::dd(uint32_t data) {
2980 CheckForEmitInForbiddenSlot();
2981 EmitHelper(data);
2982 }
2983
2984
dq(uint64_t data)2985 void Assembler::dq(uint64_t data) {
2986 CheckForEmitInForbiddenSlot();
2987 EmitHelper(data);
2988 }
2989
2990
dd(Label * label)2991 void Assembler::dd(Label* label) {
2992 uint32_t data;
2993 CheckForEmitInForbiddenSlot();
2994 if (label->is_bound()) {
2995 data = reinterpret_cast<uint32_t>(buffer_ + label->pos());
2996 } else {
2997 data = jump_address(label);
2998 unbound_labels_count_++;
2999 internal_reference_positions_.insert(label->pos());
3000 }
3001 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3002 EmitHelper(data);
3003 }
3004
3005
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)3006 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3007 // We do not try to reuse pool constants.
3008 RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
3009 if (rmode >= RelocInfo::COMMENT &&
3010 rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL) {
3011 // Adjust code for new modes.
3012 DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
3013 || RelocInfo::IsComment(rmode)
3014 || RelocInfo::IsPosition(rmode));
3015 // These modes do not need an entry in the constant pool.
3016 }
3017 if (!RelocInfo::IsNone(rinfo.rmode())) {
3018 // Don't record external references unless the heap will be serialized.
3019 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
3020 !serializer_enabled() && !emit_debug_code()) {
3021 return;
3022 }
3023 DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
3024 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
3025 RelocInfo reloc_info_with_ast_id(isolate(), pc_, rmode,
3026 RecordedAstId().ToInt(), NULL);
3027 ClearRecordedAstId();
3028 reloc_info_writer.Write(&reloc_info_with_ast_id);
3029 } else {
3030 reloc_info_writer.Write(&rinfo);
3031 }
3032 }
3033 }
3034
3035
BlockTrampolinePoolFor(int instructions)3036 void Assembler::BlockTrampolinePoolFor(int instructions) {
3037 CheckTrampolinePoolQuick(instructions);
3038 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
3039 }
3040
3041
CheckTrampolinePool()3042 void Assembler::CheckTrampolinePool() {
3043 // Some small sequences of instructions must not be broken up by the
3044 // insertion of a trampoline pool; such sequences are protected by setting
3045 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
3046 // which are both checked here. Also, recursive calls to CheckTrampolinePool
3047 // are blocked by trampoline_pool_blocked_nesting_.
3048 if ((trampoline_pool_blocked_nesting_ > 0) ||
3049 (pc_offset() < no_trampoline_pool_before_)) {
3050 // Emission is currently blocked; make sure we try again as soon as
3051 // possible.
3052 if (trampoline_pool_blocked_nesting_ > 0) {
3053 next_buffer_check_ = pc_offset() + kInstrSize;
3054 } else {
3055 next_buffer_check_ = no_trampoline_pool_before_;
3056 }
3057 return;
3058 }
3059
3060 DCHECK(!trampoline_emitted_);
3061 DCHECK(unbound_labels_count_ >= 0);
3062 if (unbound_labels_count_ > 0) {
3063 // First we emit jump (2 instructions), then we emit trampoline pool.
3064 { BlockTrampolinePoolScope block_trampoline_pool(this);
3065 Label after_pool;
3066 if (IsMipsArchVariant(kMips32r6)) {
3067 bc(&after_pool);
3068 } else {
3069 b(&after_pool);
3070 nop();
3071 }
3072
3073 int pool_start = pc_offset();
3074 if (IsMipsArchVariant(kMips32r6)) {
3075 for (int i = 0; i < unbound_labels_count_; i++) {
3076 uint32_t imm32;
3077 imm32 = jump_address(&after_pool);
3078 uint32_t lui_offset, jic_offset;
3079 UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
3080 {
3081 BlockGrowBufferScope block_buf_growth(this);
3082 // Buffer growth (and relocation) must be blocked for internal
3083 // references until associated instructions are emitted and
3084 // available to be patched.
3085 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3086 lui(at, lui_offset);
3087 jic(at, jic_offset);
3088 }
3089 CheckBuffer();
3090 }
3091 } else {
3092 for (int i = 0; i < unbound_labels_count_; i++) {
3093 uint32_t imm32;
3094 imm32 = jump_address(&after_pool);
3095 {
3096 BlockGrowBufferScope block_buf_growth(this);
3097 // Buffer growth (and relocation) must be blocked for internal
3098 // references until associated instructions are emitted and
3099 // available to be patched.
3100 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3101 lui(at, (imm32 & kHiMask) >> kLuiShift);
3102 ori(at, at, (imm32 & kImm16Mask));
3103 }
3104 CheckBuffer();
3105 jr(at);
3106 nop();
3107 }
3108 }
3109 bind(&after_pool);
3110 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
3111
3112 trampoline_emitted_ = true;
3113 // As we are only going to emit trampoline once, we need to prevent any
3114 // further emission.
3115 next_buffer_check_ = kMaxInt;
3116 }
3117 } else {
3118 // Number of branches to unbound label at this point is zero, so we can
3119 // move next buffer check to maximum.
3120 next_buffer_check_ = pc_offset() +
3121 kMaxBranchOffset - kTrampolineSlotsSize * 16;
3122 }
3123 return;
3124 }
3125
3126
target_address_at(Address pc)3127 Address Assembler::target_address_at(Address pc) {
3128 Instr instr1 = instr_at(pc);
3129 Instr instr2 = instr_at(pc + kInstrSize);
3130 // Interpret 2 instructions generated by li: lui/ori
3131 if (IsLui(instr1) && IsOri(instr2)) {
3132 // Assemble the 32 bit value.
3133 return reinterpret_cast<Address>((GetImmediate16(instr1) << kLuiShift) |
3134 GetImmediate16(instr2));
3135 }
3136
3137 // We should never get here, force a bad address if we do.
3138 UNREACHABLE();
3139 return (Address)0x0;
3140 }
3141
3142
3143 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
3144 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
3145 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
3146 // OS::nan_value() returns a qNaN.
QuietNaN(HeapObject * object)3147 void Assembler::QuietNaN(HeapObject* object) {
3148 HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
3149 }
3150
3151
3152 // On Mips, a target address is stored in a lui/ori instruction pair, each
3153 // of which load 16 bits of the 32-bit address to a register.
3154 // Patching the address must replace both instr, and flush the i-cache.
3155 // On r6, target address is stored in a lui/jic pair, and both instr have to be
3156 // patched.
3157 //
3158 // There is an optimization below, which emits a nop when the address
3159 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
3160 // and possibly removed.
set_target_address_at(Isolate * isolate,Address pc,Address target,ICacheFlushMode icache_flush_mode)3161 void Assembler::set_target_address_at(Isolate* isolate, Address pc,
3162 Address target,
3163 ICacheFlushMode icache_flush_mode) {
3164 Instr instr2 = instr_at(pc + kInstrSize);
3165 uint32_t rt_code = GetRtField(instr2);
3166 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
3167 uint32_t itarget = reinterpret_cast<uint32_t>(target);
3168
3169 #ifdef DEBUG
3170 // Check we have the result from a li macro-instruction, using instr pair.
3171 Instr instr1 = instr_at(pc);
3172 CHECK(IsLui(instr1) && (IsOri(instr2) || IsJicOrJialc(instr2)));
3173 #endif
3174
3175 if (IsJicOrJialc(instr2)) {
3176 // Must use 2 instructions to insure patchable code => use lui and jic
3177 uint32_t lui_offset, jic_offset;
3178 Assembler::UnpackTargetAddressUnsigned(itarget, lui_offset, jic_offset);
3179
3180 *p &= ~kImm16Mask;
3181 *(p + 1) &= ~kImm16Mask;
3182
3183 *p |= lui_offset;
3184 *(p + 1) |= jic_offset;
3185
3186 } else {
3187 // Must use 2 instructions to insure patchable code => just use lui and ori.
3188 // lui rt, upper-16.
3189 // ori rt rt, lower-16.
3190 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
3191 *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
3192 }
3193
3194 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
3195 Assembler::FlushICache(isolate, pc, 2 * sizeof(int32_t));
3196 }
3197 }
3198
3199 } // namespace internal
3200 } // namespace v8
3201
3202 #endif // V8_TARGET_ARCH_MIPS
3203