• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "assembler_mips64.h"
18 
19 #include "base/bit_utils.h"
20 #include "base/casts.h"
21 #include "entrypoints/quick/quick_entrypoints.h"
22 #include "entrypoints/quick/quick_entrypoints_enum.h"
23 #include "memory_region.h"
24 #include "thread.h"
25 
26 namespace art {
27 namespace mips64 {
28 
FinalizeCode()29 void Mips64Assembler::FinalizeCode() {
30   for (auto& exception_block : exception_blocks_) {
31     EmitExceptionPoll(&exception_block);
32   }
33   PromoteBranches();
34 }
35 
FinalizeInstructions(const MemoryRegion & region)36 void Mips64Assembler::FinalizeInstructions(const MemoryRegion& region) {
37   EmitBranches();
38   Assembler::FinalizeInstructions(region);
39   PatchCFI();
40 }
41 
PatchCFI()42 void Mips64Assembler::PatchCFI() {
43   if (cfi().NumberOfDelayedAdvancePCs() == 0u) {
44     return;
45   }
46 
47   typedef DebugFrameOpCodeWriterForAssembler::DelayedAdvancePC DelayedAdvancePC;
48   const auto data = cfi().ReleaseStreamAndPrepareForDelayedAdvancePC();
49   const std::vector<uint8_t>& old_stream = data.first;
50   const std::vector<DelayedAdvancePC>& advances = data.second;
51 
52   // Refill our data buffer with patched opcodes.
53   cfi().ReserveCFIStream(old_stream.size() + advances.size() + 16);
54   size_t stream_pos = 0;
55   for (const DelayedAdvancePC& advance : advances) {
56     DCHECK_GE(advance.stream_pos, stream_pos);
57     // Copy old data up to the point where advance was issued.
58     cfi().AppendRawData(old_stream, stream_pos, advance.stream_pos);
59     stream_pos = advance.stream_pos;
60     // Insert the advance command with its final offset.
61     size_t final_pc = GetAdjustedPosition(advance.pc);
62     cfi().AdvancePC(final_pc);
63   }
64   // Copy the final segment if any.
65   cfi().AppendRawData(old_stream, stream_pos, old_stream.size());
66 }
67 
EmitBranches()68 void Mips64Assembler::EmitBranches() {
69   CHECK(!overwriting_);
70   // Switch from appending instructions at the end of the buffer to overwriting
71   // existing instructions (branch placeholders) in the buffer.
72   overwriting_ = true;
73   for (auto& branch : branches_) {
74     EmitBranch(&branch);
75   }
76   overwriting_ = false;
77 }
78 
Emit(uint32_t value)79 void Mips64Assembler::Emit(uint32_t value) {
80   if (overwriting_) {
81     // Branches to labels are emitted into their placeholders here.
82     buffer_.Store<uint32_t>(overwrite_location_, value);
83     overwrite_location_ += sizeof(uint32_t);
84   } else {
85     // Other instructions are simply appended at the end here.
86     AssemblerBuffer::EnsureCapacity ensured(&buffer_);
87     buffer_.Emit<uint32_t>(value);
88   }
89 }
90 
EmitR(int opcode,GpuRegister rs,GpuRegister rt,GpuRegister rd,int shamt,int funct)91 void Mips64Assembler::EmitR(int opcode, GpuRegister rs, GpuRegister rt, GpuRegister rd,
92                             int shamt, int funct) {
93   CHECK_NE(rs, kNoGpuRegister);
94   CHECK_NE(rt, kNoGpuRegister);
95   CHECK_NE(rd, kNoGpuRegister);
96   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
97                       static_cast<uint32_t>(rs) << kRsShift |
98                       static_cast<uint32_t>(rt) << kRtShift |
99                       static_cast<uint32_t>(rd) << kRdShift |
100                       shamt << kShamtShift |
101                       funct;
102   Emit(encoding);
103 }
104 
EmitRsd(int opcode,GpuRegister rs,GpuRegister rd,int shamt,int funct)105 void Mips64Assembler::EmitRsd(int opcode, GpuRegister rs, GpuRegister rd,
106                               int shamt, int funct) {
107   CHECK_NE(rs, kNoGpuRegister);
108   CHECK_NE(rd, kNoGpuRegister);
109   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
110                       static_cast<uint32_t>(rs) << kRsShift |
111                       static_cast<uint32_t>(ZERO) << kRtShift |
112                       static_cast<uint32_t>(rd) << kRdShift |
113                       shamt << kShamtShift |
114                       funct;
115   Emit(encoding);
116 }
117 
EmitRtd(int opcode,GpuRegister rt,GpuRegister rd,int shamt,int funct)118 void Mips64Assembler::EmitRtd(int opcode, GpuRegister rt, GpuRegister rd,
119                               int shamt, int funct) {
120   CHECK_NE(rt, kNoGpuRegister);
121   CHECK_NE(rd, kNoGpuRegister);
122   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
123                       static_cast<uint32_t>(ZERO) << kRsShift |
124                       static_cast<uint32_t>(rt) << kRtShift |
125                       static_cast<uint32_t>(rd) << kRdShift |
126                       shamt << kShamtShift |
127                       funct;
128   Emit(encoding);
129 }
130 
EmitI(int opcode,GpuRegister rs,GpuRegister rt,uint16_t imm)131 void Mips64Assembler::EmitI(int opcode, GpuRegister rs, GpuRegister rt, uint16_t imm) {
132   CHECK_NE(rs, kNoGpuRegister);
133   CHECK_NE(rt, kNoGpuRegister);
134   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
135                       static_cast<uint32_t>(rs) << kRsShift |
136                       static_cast<uint32_t>(rt) << kRtShift |
137                       imm;
138   Emit(encoding);
139 }
140 
EmitI21(int opcode,GpuRegister rs,uint32_t imm21)141 void Mips64Assembler::EmitI21(int opcode, GpuRegister rs, uint32_t imm21) {
142   CHECK_NE(rs, kNoGpuRegister);
143   CHECK(IsUint<21>(imm21)) << imm21;
144   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
145                       static_cast<uint32_t>(rs) << kRsShift |
146                       imm21;
147   Emit(encoding);
148 }
149 
EmitI26(int opcode,uint32_t imm26)150 void Mips64Assembler::EmitI26(int opcode, uint32_t imm26) {
151   CHECK(IsUint<26>(imm26)) << imm26;
152   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift | imm26;
153   Emit(encoding);
154 }
155 
EmitFR(int opcode,int fmt,FpuRegister ft,FpuRegister fs,FpuRegister fd,int funct)156 void Mips64Assembler::EmitFR(int opcode, int fmt, FpuRegister ft, FpuRegister fs, FpuRegister fd,
157                              int funct) {
158   CHECK_NE(ft, kNoFpuRegister);
159   CHECK_NE(fs, kNoFpuRegister);
160   CHECK_NE(fd, kNoFpuRegister);
161   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
162                       fmt << kFmtShift |
163                       static_cast<uint32_t>(ft) << kFtShift |
164                       static_cast<uint32_t>(fs) << kFsShift |
165                       static_cast<uint32_t>(fd) << kFdShift |
166                       funct;
167   Emit(encoding);
168 }
169 
EmitFI(int opcode,int fmt,FpuRegister ft,uint16_t imm)170 void Mips64Assembler::EmitFI(int opcode, int fmt, FpuRegister ft, uint16_t imm) {
171   CHECK_NE(ft, kNoFpuRegister);
172   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
173                       fmt << kFmtShift |
174                       static_cast<uint32_t>(ft) << kFtShift |
175                       imm;
176   Emit(encoding);
177 }
178 
Addu(GpuRegister rd,GpuRegister rs,GpuRegister rt)179 void Mips64Assembler::Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
180   EmitR(0, rs, rt, rd, 0, 0x21);
181 }
182 
Addiu(GpuRegister rt,GpuRegister rs,uint16_t imm16)183 void Mips64Assembler::Addiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
184   EmitI(0x9, rs, rt, imm16);
185 }
186 
Daddu(GpuRegister rd,GpuRegister rs,GpuRegister rt)187 void Mips64Assembler::Daddu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
188   EmitR(0, rs, rt, rd, 0, 0x2d);
189 }
190 
Daddiu(GpuRegister rt,GpuRegister rs,uint16_t imm16)191 void Mips64Assembler::Daddiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
192   EmitI(0x19, rs, rt, imm16);
193 }
194 
Subu(GpuRegister rd,GpuRegister rs,GpuRegister rt)195 void Mips64Assembler::Subu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
196   EmitR(0, rs, rt, rd, 0, 0x23);
197 }
198 
Dsubu(GpuRegister rd,GpuRegister rs,GpuRegister rt)199 void Mips64Assembler::Dsubu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
200   EmitR(0, rs, rt, rd, 0, 0x2f);
201 }
202 
MulR6(GpuRegister rd,GpuRegister rs,GpuRegister rt)203 void Mips64Assembler::MulR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
204   EmitR(0, rs, rt, rd, 2, 0x18);
205 }
206 
MuhR6(GpuRegister rd,GpuRegister rs,GpuRegister rt)207 void Mips64Assembler::MuhR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
208   EmitR(0, rs, rt, rd, 3, 0x18);
209 }
210 
DivR6(GpuRegister rd,GpuRegister rs,GpuRegister rt)211 void Mips64Assembler::DivR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
212   EmitR(0, rs, rt, rd, 2, 0x1a);
213 }
214 
ModR6(GpuRegister rd,GpuRegister rs,GpuRegister rt)215 void Mips64Assembler::ModR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
216   EmitR(0, rs, rt, rd, 3, 0x1a);
217 }
218 
DivuR6(GpuRegister rd,GpuRegister rs,GpuRegister rt)219 void Mips64Assembler::DivuR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
220   EmitR(0, rs, rt, rd, 2, 0x1b);
221 }
222 
ModuR6(GpuRegister rd,GpuRegister rs,GpuRegister rt)223 void Mips64Assembler::ModuR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
224   EmitR(0, rs, rt, rd, 3, 0x1b);
225 }
226 
Dmul(GpuRegister rd,GpuRegister rs,GpuRegister rt)227 void Mips64Assembler::Dmul(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
228   EmitR(0, rs, rt, rd, 2, 0x1c);
229 }
230 
Dmuh(GpuRegister rd,GpuRegister rs,GpuRegister rt)231 void Mips64Assembler::Dmuh(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
232   EmitR(0, rs, rt, rd, 3, 0x1c);
233 }
234 
Ddiv(GpuRegister rd,GpuRegister rs,GpuRegister rt)235 void Mips64Assembler::Ddiv(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
236   EmitR(0, rs, rt, rd, 2, 0x1e);
237 }
238 
Dmod(GpuRegister rd,GpuRegister rs,GpuRegister rt)239 void Mips64Assembler::Dmod(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
240   EmitR(0, rs, rt, rd, 3, 0x1e);
241 }
242 
Ddivu(GpuRegister rd,GpuRegister rs,GpuRegister rt)243 void Mips64Assembler::Ddivu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
244   EmitR(0, rs, rt, rd, 2, 0x1f);
245 }
246 
Dmodu(GpuRegister rd,GpuRegister rs,GpuRegister rt)247 void Mips64Assembler::Dmodu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
248   EmitR(0, rs, rt, rd, 3, 0x1f);
249 }
250 
And(GpuRegister rd,GpuRegister rs,GpuRegister rt)251 void Mips64Assembler::And(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
252   EmitR(0, rs, rt, rd, 0, 0x24);
253 }
254 
Andi(GpuRegister rt,GpuRegister rs,uint16_t imm16)255 void Mips64Assembler::Andi(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
256   EmitI(0xc, rs, rt, imm16);
257 }
258 
Or(GpuRegister rd,GpuRegister rs,GpuRegister rt)259 void Mips64Assembler::Or(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
260   EmitR(0, rs, rt, rd, 0, 0x25);
261 }
262 
Ori(GpuRegister rt,GpuRegister rs,uint16_t imm16)263 void Mips64Assembler::Ori(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
264   EmitI(0xd, rs, rt, imm16);
265 }
266 
Xor(GpuRegister rd,GpuRegister rs,GpuRegister rt)267 void Mips64Assembler::Xor(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
268   EmitR(0, rs, rt, rd, 0, 0x26);
269 }
270 
Xori(GpuRegister rt,GpuRegister rs,uint16_t imm16)271 void Mips64Assembler::Xori(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
272   EmitI(0xe, rs, rt, imm16);
273 }
274 
Nor(GpuRegister rd,GpuRegister rs,GpuRegister rt)275 void Mips64Assembler::Nor(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
276   EmitR(0, rs, rt, rd, 0, 0x27);
277 }
278 
Bitswap(GpuRegister rd,GpuRegister rt)279 void Mips64Assembler::Bitswap(GpuRegister rd, GpuRegister rt) {
280   EmitRtd(0x1f, rt, rd, 0x0, 0x20);
281 }
282 
Dbitswap(GpuRegister rd,GpuRegister rt)283 void Mips64Assembler::Dbitswap(GpuRegister rd, GpuRegister rt) {
284   EmitRtd(0x1f, rt, rd, 0x0, 0x24);
285 }
286 
Seb(GpuRegister rd,GpuRegister rt)287 void Mips64Assembler::Seb(GpuRegister rd, GpuRegister rt) {
288   EmitR(0x1f, static_cast<GpuRegister>(0), rt, rd, 0x10, 0x20);
289 }
290 
Seh(GpuRegister rd,GpuRegister rt)291 void Mips64Assembler::Seh(GpuRegister rd, GpuRegister rt) {
292   EmitR(0x1f, static_cast<GpuRegister>(0), rt, rd, 0x18, 0x20);
293 }
294 
Dsbh(GpuRegister rd,GpuRegister rt)295 void Mips64Assembler::Dsbh(GpuRegister rd, GpuRegister rt) {
296   EmitRtd(0x1f, rt, rd, 0x2, 0x24);
297 }
298 
Dshd(GpuRegister rd,GpuRegister rt)299 void Mips64Assembler::Dshd(GpuRegister rd, GpuRegister rt) {
300   EmitRtd(0x1f, rt, rd, 0x5, 0x24);
301 }
302 
Dext(GpuRegister rt,GpuRegister rs,int pos,int size)303 void Mips64Assembler::Dext(GpuRegister rt, GpuRegister rs, int pos, int size) {
304   CHECK(IsUint<5>(pos)) << pos;
305   CHECK(IsUint<5>(size - 1)) << size;
306   EmitR(0x1f, rs, rt, static_cast<GpuRegister>(size - 1), pos, 0x3);
307 }
308 
Dinsu(GpuRegister rt,GpuRegister rs,int pos,int size)309 void Mips64Assembler::Dinsu(GpuRegister rt, GpuRegister rs, int pos, int size) {
310   CHECK(IsUint<5>(pos - 32)) << pos;
311   CHECK(IsUint<5>(size - 1)) << size;
312   CHECK(IsUint<5>(pos + size - 33)) << pos << " + " << size;
313   EmitR(0x1f, rs, rt, static_cast<GpuRegister>(pos + size - 33), pos - 32, 0x6);
314 }
315 
Wsbh(GpuRegister rd,GpuRegister rt)316 void Mips64Assembler::Wsbh(GpuRegister rd, GpuRegister rt) {
317   EmitRtd(0x1f, rt, rd, 2, 0x20);
318 }
319 
Sc(GpuRegister rt,GpuRegister base,int16_t imm9)320 void Mips64Assembler::Sc(GpuRegister rt, GpuRegister base, int16_t imm9) {
321   CHECK(IsInt<9>(imm9));
322   EmitI(0x1f, base, rt, ((imm9 & 0x1FF) << 7) | 0x26);
323 }
324 
Scd(GpuRegister rt,GpuRegister base,int16_t imm9)325 void Mips64Assembler::Scd(GpuRegister rt, GpuRegister base, int16_t imm9) {
326   CHECK(IsInt<9>(imm9));
327   EmitI(0x1f, base, rt, ((imm9 & 0x1FF) << 7) | 0x27);
328 }
329 
Ll(GpuRegister rt,GpuRegister base,int16_t imm9)330 void Mips64Assembler::Ll(GpuRegister rt, GpuRegister base, int16_t imm9) {
331   CHECK(IsInt<9>(imm9));
332   EmitI(0x1f, base, rt, ((imm9 & 0x1FF) << 7) | 0x36);
333 }
334 
Lld(GpuRegister rt,GpuRegister base,int16_t imm9)335 void Mips64Assembler::Lld(GpuRegister rt, GpuRegister base, int16_t imm9) {
336   CHECK(IsInt<9>(imm9));
337   EmitI(0x1f, base, rt, ((imm9 & 0x1FF) << 7) | 0x37);
338 }
339 
Sll(GpuRegister rd,GpuRegister rt,int shamt)340 void Mips64Assembler::Sll(GpuRegister rd, GpuRegister rt, int shamt) {
341   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x00);
342 }
343 
Srl(GpuRegister rd,GpuRegister rt,int shamt)344 void Mips64Assembler::Srl(GpuRegister rd, GpuRegister rt, int shamt) {
345   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x02);
346 }
347 
Rotr(GpuRegister rd,GpuRegister rt,int shamt)348 void Mips64Assembler::Rotr(GpuRegister rd, GpuRegister rt, int shamt) {
349   EmitR(0, static_cast<GpuRegister>(1), rt, rd, shamt, 0x02);
350 }
351 
Sra(GpuRegister rd,GpuRegister rt,int shamt)352 void Mips64Assembler::Sra(GpuRegister rd, GpuRegister rt, int shamt) {
353   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x03);
354 }
355 
Sllv(GpuRegister rd,GpuRegister rt,GpuRegister rs)356 void Mips64Assembler::Sllv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
357   EmitR(0, rs, rt, rd, 0, 0x04);
358 }
359 
Rotrv(GpuRegister rd,GpuRegister rt,GpuRegister rs)360 void Mips64Assembler::Rotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
361   EmitR(0, rs, rt, rd, 1, 0x06);
362 }
363 
Srlv(GpuRegister rd,GpuRegister rt,GpuRegister rs)364 void Mips64Assembler::Srlv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
365   EmitR(0, rs, rt, rd, 0, 0x06);
366 }
367 
Srav(GpuRegister rd,GpuRegister rt,GpuRegister rs)368 void Mips64Assembler::Srav(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
369   EmitR(0, rs, rt, rd, 0, 0x07);
370 }
371 
Dsll(GpuRegister rd,GpuRegister rt,int shamt)372 void Mips64Assembler::Dsll(GpuRegister rd, GpuRegister rt, int shamt) {
373   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x38);
374 }
375 
Dsrl(GpuRegister rd,GpuRegister rt,int shamt)376 void Mips64Assembler::Dsrl(GpuRegister rd, GpuRegister rt, int shamt) {
377   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3a);
378 }
379 
Drotr(GpuRegister rd,GpuRegister rt,int shamt)380 void Mips64Assembler::Drotr(GpuRegister rd, GpuRegister rt, int shamt) {
381   EmitR(0, static_cast<GpuRegister>(1), rt, rd, shamt, 0x3a);
382 }
383 
Dsra(GpuRegister rd,GpuRegister rt,int shamt)384 void Mips64Assembler::Dsra(GpuRegister rd, GpuRegister rt, int shamt) {
385   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3b);
386 }
387 
Dsll32(GpuRegister rd,GpuRegister rt,int shamt)388 void Mips64Assembler::Dsll32(GpuRegister rd, GpuRegister rt, int shamt) {
389   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3c);
390 }
391 
Dsrl32(GpuRegister rd,GpuRegister rt,int shamt)392 void Mips64Assembler::Dsrl32(GpuRegister rd, GpuRegister rt, int shamt) {
393   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3e);
394 }
395 
Drotr32(GpuRegister rd,GpuRegister rt,int shamt)396 void Mips64Assembler::Drotr32(GpuRegister rd, GpuRegister rt, int shamt) {
397   EmitR(0, static_cast<GpuRegister>(1), rt, rd, shamt, 0x3e);
398 }
399 
Dsra32(GpuRegister rd,GpuRegister rt,int shamt)400 void Mips64Assembler::Dsra32(GpuRegister rd, GpuRegister rt, int shamt) {
401   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3f);
402 }
403 
Dsllv(GpuRegister rd,GpuRegister rt,GpuRegister rs)404 void Mips64Assembler::Dsllv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
405   EmitR(0, rs, rt, rd, 0, 0x14);
406 }
407 
Dsrlv(GpuRegister rd,GpuRegister rt,GpuRegister rs)408 void Mips64Assembler::Dsrlv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
409   EmitR(0, rs, rt, rd, 0, 0x16);
410 }
411 
Drotrv(GpuRegister rd,GpuRegister rt,GpuRegister rs)412 void Mips64Assembler::Drotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
413   EmitR(0, rs, rt, rd, 1, 0x16);
414 }
415 
Dsrav(GpuRegister rd,GpuRegister rt,GpuRegister rs)416 void Mips64Assembler::Dsrav(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
417   EmitR(0, rs, rt, rd, 0, 0x17);
418 }
419 
Lb(GpuRegister rt,GpuRegister rs,uint16_t imm16)420 void Mips64Assembler::Lb(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
421   EmitI(0x20, rs, rt, imm16);
422 }
423 
Lh(GpuRegister rt,GpuRegister rs,uint16_t imm16)424 void Mips64Assembler::Lh(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
425   EmitI(0x21, rs, rt, imm16);
426 }
427 
Lw(GpuRegister rt,GpuRegister rs,uint16_t imm16)428 void Mips64Assembler::Lw(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
429   EmitI(0x23, rs, rt, imm16);
430 }
431 
Ld(GpuRegister rt,GpuRegister rs,uint16_t imm16)432 void Mips64Assembler::Ld(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
433   EmitI(0x37, rs, rt, imm16);
434 }
435 
Lbu(GpuRegister rt,GpuRegister rs,uint16_t imm16)436 void Mips64Assembler::Lbu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
437   EmitI(0x24, rs, rt, imm16);
438 }
439 
Lhu(GpuRegister rt,GpuRegister rs,uint16_t imm16)440 void Mips64Assembler::Lhu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
441   EmitI(0x25, rs, rt, imm16);
442 }
443 
Lwu(GpuRegister rt,GpuRegister rs,uint16_t imm16)444 void Mips64Assembler::Lwu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
445   EmitI(0x27, rs, rt, imm16);
446 }
447 
Lui(GpuRegister rt,uint16_t imm16)448 void Mips64Assembler::Lui(GpuRegister rt, uint16_t imm16) {
449   EmitI(0xf, static_cast<GpuRegister>(0), rt, imm16);
450 }
451 
Dahi(GpuRegister rs,uint16_t imm16)452 void Mips64Assembler::Dahi(GpuRegister rs, uint16_t imm16) {
453   EmitI(1, rs, static_cast<GpuRegister>(6), imm16);
454 }
455 
Dati(GpuRegister rs,uint16_t imm16)456 void Mips64Assembler::Dati(GpuRegister rs, uint16_t imm16) {
457   EmitI(1, rs, static_cast<GpuRegister>(0x1e), imm16);
458 }
459 
Sync(uint32_t stype)460 void Mips64Assembler::Sync(uint32_t stype) {
461   EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
462            static_cast<GpuRegister>(0), stype & 0x1f, 0xf);
463 }
464 
Sb(GpuRegister rt,GpuRegister rs,uint16_t imm16)465 void Mips64Assembler::Sb(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
466   EmitI(0x28, rs, rt, imm16);
467 }
468 
Sh(GpuRegister rt,GpuRegister rs,uint16_t imm16)469 void Mips64Assembler::Sh(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
470   EmitI(0x29, rs, rt, imm16);
471 }
472 
Sw(GpuRegister rt,GpuRegister rs,uint16_t imm16)473 void Mips64Assembler::Sw(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
474   EmitI(0x2b, rs, rt, imm16);
475 }
476 
Sd(GpuRegister rt,GpuRegister rs,uint16_t imm16)477 void Mips64Assembler::Sd(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
478   EmitI(0x3f, rs, rt, imm16);
479 }
480 
Slt(GpuRegister rd,GpuRegister rs,GpuRegister rt)481 void Mips64Assembler::Slt(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
482   EmitR(0, rs, rt, rd, 0, 0x2a);
483 }
484 
Sltu(GpuRegister rd,GpuRegister rs,GpuRegister rt)485 void Mips64Assembler::Sltu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
486   EmitR(0, rs, rt, rd, 0, 0x2b);
487 }
488 
Slti(GpuRegister rt,GpuRegister rs,uint16_t imm16)489 void Mips64Assembler::Slti(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
490   EmitI(0xa, rs, rt, imm16);
491 }
492 
Sltiu(GpuRegister rt,GpuRegister rs,uint16_t imm16)493 void Mips64Assembler::Sltiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
494   EmitI(0xb, rs, rt, imm16);
495 }
496 
Seleqz(GpuRegister rd,GpuRegister rs,GpuRegister rt)497 void Mips64Assembler::Seleqz(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
498   EmitR(0, rs, rt, rd, 0, 0x35);
499 }
500 
Selnez(GpuRegister rd,GpuRegister rs,GpuRegister rt)501 void Mips64Assembler::Selnez(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
502   EmitR(0, rs, rt, rd, 0, 0x37);
503 }
504 
Clz(GpuRegister rd,GpuRegister rs)505 void Mips64Assembler::Clz(GpuRegister rd, GpuRegister rs) {
506   EmitRsd(0, rs, rd, 0x01, 0x10);
507 }
508 
Clo(GpuRegister rd,GpuRegister rs)509 void Mips64Assembler::Clo(GpuRegister rd, GpuRegister rs) {
510   EmitRsd(0, rs, rd, 0x01, 0x11);
511 }
512 
Dclz(GpuRegister rd,GpuRegister rs)513 void Mips64Assembler::Dclz(GpuRegister rd, GpuRegister rs) {
514   EmitRsd(0, rs, rd, 0x01, 0x12);
515 }
516 
Dclo(GpuRegister rd,GpuRegister rs)517 void Mips64Assembler::Dclo(GpuRegister rd, GpuRegister rs) {
518   EmitRsd(0, rs, rd, 0x01, 0x13);
519 }
520 
Jalr(GpuRegister rd,GpuRegister rs)521 void Mips64Assembler::Jalr(GpuRegister rd, GpuRegister rs) {
522   EmitR(0, rs, static_cast<GpuRegister>(0), rd, 0, 0x09);
523 }
524 
Jalr(GpuRegister rs)525 void Mips64Assembler::Jalr(GpuRegister rs) {
526   Jalr(RA, rs);
527 }
528 
Jr(GpuRegister rs)529 void Mips64Assembler::Jr(GpuRegister rs) {
530   Jalr(ZERO, rs);
531 }
532 
Auipc(GpuRegister rs,uint16_t imm16)533 void Mips64Assembler::Auipc(GpuRegister rs, uint16_t imm16) {
534   EmitI(0x3B, rs, static_cast<GpuRegister>(0x1E), imm16);
535 }
536 
Addiupc(GpuRegister rs,uint32_t imm19)537 void Mips64Assembler::Addiupc(GpuRegister rs, uint32_t imm19) {
538   CHECK(IsUint<19>(imm19)) << imm19;
539   EmitI21(0x3B, rs, imm19);
540 }
541 
Bc(uint32_t imm26)542 void Mips64Assembler::Bc(uint32_t imm26) {
543   EmitI26(0x32, imm26);
544 }
545 
Jic(GpuRegister rt,uint16_t imm16)546 void Mips64Assembler::Jic(GpuRegister rt, uint16_t imm16) {
547   EmitI(0x36, static_cast<GpuRegister>(0), rt, imm16);
548 }
549 
Jialc(GpuRegister rt,uint16_t imm16)550 void Mips64Assembler::Jialc(GpuRegister rt, uint16_t imm16) {
551   EmitI(0x3E, static_cast<GpuRegister>(0), rt, imm16);
552 }
553 
Bltc(GpuRegister rs,GpuRegister rt,uint16_t imm16)554 void Mips64Assembler::Bltc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
555   CHECK_NE(rs, ZERO);
556   CHECK_NE(rt, ZERO);
557   CHECK_NE(rs, rt);
558   EmitI(0x17, rs, rt, imm16);
559 }
560 
Bltzc(GpuRegister rt,uint16_t imm16)561 void Mips64Assembler::Bltzc(GpuRegister rt, uint16_t imm16) {
562   CHECK_NE(rt, ZERO);
563   EmitI(0x17, rt, rt, imm16);
564 }
565 
Bgtzc(GpuRegister rt,uint16_t imm16)566 void Mips64Assembler::Bgtzc(GpuRegister rt, uint16_t imm16) {
567   CHECK_NE(rt, ZERO);
568   EmitI(0x17, static_cast<GpuRegister>(0), rt, imm16);
569 }
570 
Bgec(GpuRegister rs,GpuRegister rt,uint16_t imm16)571 void Mips64Assembler::Bgec(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
572   CHECK_NE(rs, ZERO);
573   CHECK_NE(rt, ZERO);
574   CHECK_NE(rs, rt);
575   EmitI(0x16, rs, rt, imm16);
576 }
577 
Bgezc(GpuRegister rt,uint16_t imm16)578 void Mips64Assembler::Bgezc(GpuRegister rt, uint16_t imm16) {
579   CHECK_NE(rt, ZERO);
580   EmitI(0x16, rt, rt, imm16);
581 }
582 
Blezc(GpuRegister rt,uint16_t imm16)583 void Mips64Assembler::Blezc(GpuRegister rt, uint16_t imm16) {
584   CHECK_NE(rt, ZERO);
585   EmitI(0x16, static_cast<GpuRegister>(0), rt, imm16);
586 }
587 
Bltuc(GpuRegister rs,GpuRegister rt,uint16_t imm16)588 void Mips64Assembler::Bltuc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
589   CHECK_NE(rs, ZERO);
590   CHECK_NE(rt, ZERO);
591   CHECK_NE(rs, rt);
592   EmitI(0x7, rs, rt, imm16);
593 }
594 
Bgeuc(GpuRegister rs,GpuRegister rt,uint16_t imm16)595 void Mips64Assembler::Bgeuc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
596   CHECK_NE(rs, ZERO);
597   CHECK_NE(rt, ZERO);
598   CHECK_NE(rs, rt);
599   EmitI(0x6, rs, rt, imm16);
600 }
601 
Beqc(GpuRegister rs,GpuRegister rt,uint16_t imm16)602 void Mips64Assembler::Beqc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
603   CHECK_NE(rs, ZERO);
604   CHECK_NE(rt, ZERO);
605   CHECK_NE(rs, rt);
606   EmitI(0x8, std::min(rs, rt), std::max(rs, rt), imm16);
607 }
608 
Bnec(GpuRegister rs,GpuRegister rt,uint16_t imm16)609 void Mips64Assembler::Bnec(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
610   CHECK_NE(rs, ZERO);
611   CHECK_NE(rt, ZERO);
612   CHECK_NE(rs, rt);
613   EmitI(0x18, std::min(rs, rt), std::max(rs, rt), imm16);
614 }
615 
Beqzc(GpuRegister rs,uint32_t imm21)616 void Mips64Assembler::Beqzc(GpuRegister rs, uint32_t imm21) {
617   CHECK_NE(rs, ZERO);
618   EmitI21(0x36, rs, imm21);
619 }
620 
Bnezc(GpuRegister rs,uint32_t imm21)621 void Mips64Assembler::Bnezc(GpuRegister rs, uint32_t imm21) {
622   CHECK_NE(rs, ZERO);
623   EmitI21(0x3E, rs, imm21);
624 }
625 
Bc1eqz(FpuRegister ft,uint16_t imm16)626 void Mips64Assembler::Bc1eqz(FpuRegister ft, uint16_t imm16) {
627   EmitFI(0x11, 0x9, ft, imm16);
628 }
629 
Bc1nez(FpuRegister ft,uint16_t imm16)630 void Mips64Assembler::Bc1nez(FpuRegister ft, uint16_t imm16) {
631   EmitFI(0x11, 0xD, ft, imm16);
632 }
633 
EmitBcondc(BranchCondition cond,GpuRegister rs,GpuRegister rt,uint32_t imm16_21)634 void Mips64Assembler::EmitBcondc(BranchCondition cond,
635                                  GpuRegister rs,
636                                  GpuRegister rt,
637                                  uint32_t imm16_21) {
638   switch (cond) {
639     case kCondLT:
640       Bltc(rs, rt, imm16_21);
641       break;
642     case kCondGE:
643       Bgec(rs, rt, imm16_21);
644       break;
645     case kCondLE:
646       Bgec(rt, rs, imm16_21);
647       break;
648     case kCondGT:
649       Bltc(rt, rs, imm16_21);
650       break;
651     case kCondLTZ:
652       CHECK_EQ(rt, ZERO);
653       Bltzc(rs, imm16_21);
654       break;
655     case kCondGEZ:
656       CHECK_EQ(rt, ZERO);
657       Bgezc(rs, imm16_21);
658       break;
659     case kCondLEZ:
660       CHECK_EQ(rt, ZERO);
661       Blezc(rs, imm16_21);
662       break;
663     case kCondGTZ:
664       CHECK_EQ(rt, ZERO);
665       Bgtzc(rs, imm16_21);
666       break;
667     case kCondEQ:
668       Beqc(rs, rt, imm16_21);
669       break;
670     case kCondNE:
671       Bnec(rs, rt, imm16_21);
672       break;
673     case kCondEQZ:
674       CHECK_EQ(rt, ZERO);
675       Beqzc(rs, imm16_21);
676       break;
677     case kCondNEZ:
678       CHECK_EQ(rt, ZERO);
679       Bnezc(rs, imm16_21);
680       break;
681     case kCondLTU:
682       Bltuc(rs, rt, imm16_21);
683       break;
684     case kCondGEU:
685       Bgeuc(rs, rt, imm16_21);
686       break;
687     case kCondF:
688       CHECK_EQ(rt, ZERO);
689       Bc1eqz(static_cast<FpuRegister>(rs), imm16_21);
690       break;
691     case kCondT:
692       CHECK_EQ(rt, ZERO);
693       Bc1nez(static_cast<FpuRegister>(rs), imm16_21);
694       break;
695     case kUncond:
696       LOG(FATAL) << "Unexpected branch condition " << cond;
697       UNREACHABLE();
698   }
699 }
700 
AddS(FpuRegister fd,FpuRegister fs,FpuRegister ft)701 void Mips64Assembler::AddS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
702   EmitFR(0x11, 0x10, ft, fs, fd, 0x0);
703 }
704 
SubS(FpuRegister fd,FpuRegister fs,FpuRegister ft)705 void Mips64Assembler::SubS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
706   EmitFR(0x11, 0x10, ft, fs, fd, 0x1);
707 }
708 
MulS(FpuRegister fd,FpuRegister fs,FpuRegister ft)709 void Mips64Assembler::MulS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
710   EmitFR(0x11, 0x10, ft, fs, fd, 0x2);
711 }
712 
DivS(FpuRegister fd,FpuRegister fs,FpuRegister ft)713 void Mips64Assembler::DivS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
714   EmitFR(0x11, 0x10, ft, fs, fd, 0x3);
715 }
716 
AddD(FpuRegister fd,FpuRegister fs,FpuRegister ft)717 void Mips64Assembler::AddD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
718   EmitFR(0x11, 0x11, ft, fs, fd, 0x0);
719 }
720 
SubD(FpuRegister fd,FpuRegister fs,FpuRegister ft)721 void Mips64Assembler::SubD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
722   EmitFR(0x11, 0x11, ft, fs, fd, 0x1);
723 }
724 
MulD(FpuRegister fd,FpuRegister fs,FpuRegister ft)725 void Mips64Assembler::MulD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
726   EmitFR(0x11, 0x11, ft, fs, fd, 0x2);
727 }
728 
DivD(FpuRegister fd,FpuRegister fs,FpuRegister ft)729 void Mips64Assembler::DivD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
730   EmitFR(0x11, 0x11, ft, fs, fd, 0x3);
731 }
732 
SqrtS(FpuRegister fd,FpuRegister fs)733 void Mips64Assembler::SqrtS(FpuRegister fd, FpuRegister fs) {
734   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x4);
735 }
736 
SqrtD(FpuRegister fd,FpuRegister fs)737 void Mips64Assembler::SqrtD(FpuRegister fd, FpuRegister fs) {
738   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x4);
739 }
740 
AbsS(FpuRegister fd,FpuRegister fs)741 void Mips64Assembler::AbsS(FpuRegister fd, FpuRegister fs) {
742   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x5);
743 }
744 
AbsD(FpuRegister fd,FpuRegister fs)745 void Mips64Assembler::AbsD(FpuRegister fd, FpuRegister fs) {
746   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x5);
747 }
748 
MovS(FpuRegister fd,FpuRegister fs)749 void Mips64Assembler::MovS(FpuRegister fd, FpuRegister fs) {
750   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x6);
751 }
752 
MovD(FpuRegister fd,FpuRegister fs)753 void Mips64Assembler::MovD(FpuRegister fd, FpuRegister fs) {
754   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x6);
755 }
756 
NegS(FpuRegister fd,FpuRegister fs)757 void Mips64Assembler::NegS(FpuRegister fd, FpuRegister fs) {
758   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x7);
759 }
760 
NegD(FpuRegister fd,FpuRegister fs)761 void Mips64Assembler::NegD(FpuRegister fd, FpuRegister fs) {
762   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x7);
763 }
764 
RoundLS(FpuRegister fd,FpuRegister fs)765 void Mips64Assembler::RoundLS(FpuRegister fd, FpuRegister fs) {
766   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x8);
767 }
768 
RoundLD(FpuRegister fd,FpuRegister fs)769 void Mips64Assembler::RoundLD(FpuRegister fd, FpuRegister fs) {
770   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x8);
771 }
772 
RoundWS(FpuRegister fd,FpuRegister fs)773 void Mips64Assembler::RoundWS(FpuRegister fd, FpuRegister fs) {
774   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xc);
775 }
776 
RoundWD(FpuRegister fd,FpuRegister fs)777 void Mips64Assembler::RoundWD(FpuRegister fd, FpuRegister fs) {
778   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xc);
779 }
780 
TruncLS(FpuRegister fd,FpuRegister fs)781 void Mips64Assembler::TruncLS(FpuRegister fd, FpuRegister fs) {
782   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x9);
783 }
784 
TruncLD(FpuRegister fd,FpuRegister fs)785 void Mips64Assembler::TruncLD(FpuRegister fd, FpuRegister fs) {
786   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x9);
787 }
788 
TruncWS(FpuRegister fd,FpuRegister fs)789 void Mips64Assembler::TruncWS(FpuRegister fd, FpuRegister fs) {
790   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xd);
791 }
792 
TruncWD(FpuRegister fd,FpuRegister fs)793 void Mips64Assembler::TruncWD(FpuRegister fd, FpuRegister fs) {
794   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xd);
795 }
796 
CeilLS(FpuRegister fd,FpuRegister fs)797 void Mips64Assembler::CeilLS(FpuRegister fd, FpuRegister fs) {
798   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xa);
799 }
800 
CeilLD(FpuRegister fd,FpuRegister fs)801 void Mips64Assembler::CeilLD(FpuRegister fd, FpuRegister fs) {
802   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xa);
803 }
804 
CeilWS(FpuRegister fd,FpuRegister fs)805 void Mips64Assembler::CeilWS(FpuRegister fd, FpuRegister fs) {
806   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xe);
807 }
808 
CeilWD(FpuRegister fd,FpuRegister fs)809 void Mips64Assembler::CeilWD(FpuRegister fd, FpuRegister fs) {
810   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xe);
811 }
812 
FloorLS(FpuRegister fd,FpuRegister fs)813 void Mips64Assembler::FloorLS(FpuRegister fd, FpuRegister fs) {
814   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xb);
815 }
816 
FloorLD(FpuRegister fd,FpuRegister fs)817 void Mips64Assembler::FloorLD(FpuRegister fd, FpuRegister fs) {
818   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xb);
819 }
820 
FloorWS(FpuRegister fd,FpuRegister fs)821 void Mips64Assembler::FloorWS(FpuRegister fd, FpuRegister fs) {
822   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xf);
823 }
824 
FloorWD(FpuRegister fd,FpuRegister fs)825 void Mips64Assembler::FloorWD(FpuRegister fd, FpuRegister fs) {
826   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xf);
827 }
828 
SelS(FpuRegister fd,FpuRegister fs,FpuRegister ft)829 void Mips64Assembler::SelS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
830   EmitFR(0x11, 0x10, ft, fs, fd, 0x10);
831 }
832 
SelD(FpuRegister fd,FpuRegister fs,FpuRegister ft)833 void Mips64Assembler::SelD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
834   EmitFR(0x11, 0x11, ft, fs, fd, 0x10);
835 }
836 
RintS(FpuRegister fd,FpuRegister fs)837 void Mips64Assembler::RintS(FpuRegister fd, FpuRegister fs) {
838   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x1a);
839 }
840 
RintD(FpuRegister fd,FpuRegister fs)841 void Mips64Assembler::RintD(FpuRegister fd, FpuRegister fs) {
842   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x1a);
843 }
844 
ClassS(FpuRegister fd,FpuRegister fs)845 void Mips64Assembler::ClassS(FpuRegister fd, FpuRegister fs) {
846   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x1b);
847 }
848 
ClassD(FpuRegister fd,FpuRegister fs)849 void Mips64Assembler::ClassD(FpuRegister fd, FpuRegister fs) {
850   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x1b);
851 }
852 
MinS(FpuRegister fd,FpuRegister fs,FpuRegister ft)853 void Mips64Assembler::MinS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
854   EmitFR(0x11, 0x10, ft, fs, fd, 0x1c);
855 }
856 
MinD(FpuRegister fd,FpuRegister fs,FpuRegister ft)857 void Mips64Assembler::MinD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
858   EmitFR(0x11, 0x11, ft, fs, fd, 0x1c);
859 }
860 
MaxS(FpuRegister fd,FpuRegister fs,FpuRegister ft)861 void Mips64Assembler::MaxS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
862   EmitFR(0x11, 0x10, ft, fs, fd, 0x1e);
863 }
864 
MaxD(FpuRegister fd,FpuRegister fs,FpuRegister ft)865 void Mips64Assembler::MaxD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
866   EmitFR(0x11, 0x11, ft, fs, fd, 0x1e);
867 }
868 
CmpUnS(FpuRegister fd,FpuRegister fs,FpuRegister ft)869 void Mips64Assembler::CmpUnS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
870   EmitFR(0x11, 0x14, ft, fs, fd, 0x01);
871 }
872 
CmpEqS(FpuRegister fd,FpuRegister fs,FpuRegister ft)873 void Mips64Assembler::CmpEqS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
874   EmitFR(0x11, 0x14, ft, fs, fd, 0x02);
875 }
876 
CmpUeqS(FpuRegister fd,FpuRegister fs,FpuRegister ft)877 void Mips64Assembler::CmpUeqS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
878   EmitFR(0x11, 0x14, ft, fs, fd, 0x03);
879 }
880 
CmpLtS(FpuRegister fd,FpuRegister fs,FpuRegister ft)881 void Mips64Assembler::CmpLtS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
882   EmitFR(0x11, 0x14, ft, fs, fd, 0x04);
883 }
884 
CmpUltS(FpuRegister fd,FpuRegister fs,FpuRegister ft)885 void Mips64Assembler::CmpUltS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
886   EmitFR(0x11, 0x14, ft, fs, fd, 0x05);
887 }
888 
CmpLeS(FpuRegister fd,FpuRegister fs,FpuRegister ft)889 void Mips64Assembler::CmpLeS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
890   EmitFR(0x11, 0x14, ft, fs, fd, 0x06);
891 }
892 
CmpUleS(FpuRegister fd,FpuRegister fs,FpuRegister ft)893 void Mips64Assembler::CmpUleS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
894   EmitFR(0x11, 0x14, ft, fs, fd, 0x07);
895 }
896 
CmpOrS(FpuRegister fd,FpuRegister fs,FpuRegister ft)897 void Mips64Assembler::CmpOrS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
898   EmitFR(0x11, 0x14, ft, fs, fd, 0x11);
899 }
900 
CmpUneS(FpuRegister fd,FpuRegister fs,FpuRegister ft)901 void Mips64Assembler::CmpUneS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
902   EmitFR(0x11, 0x14, ft, fs, fd, 0x12);
903 }
904 
CmpNeS(FpuRegister fd,FpuRegister fs,FpuRegister ft)905 void Mips64Assembler::CmpNeS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
906   EmitFR(0x11, 0x14, ft, fs, fd, 0x13);
907 }
908 
CmpUnD(FpuRegister fd,FpuRegister fs,FpuRegister ft)909 void Mips64Assembler::CmpUnD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
910   EmitFR(0x11, 0x15, ft, fs, fd, 0x01);
911 }
912 
CmpEqD(FpuRegister fd,FpuRegister fs,FpuRegister ft)913 void Mips64Assembler::CmpEqD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
914   EmitFR(0x11, 0x15, ft, fs, fd, 0x02);
915 }
916 
CmpUeqD(FpuRegister fd,FpuRegister fs,FpuRegister ft)917 void Mips64Assembler::CmpUeqD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
918   EmitFR(0x11, 0x15, ft, fs, fd, 0x03);
919 }
920 
CmpLtD(FpuRegister fd,FpuRegister fs,FpuRegister ft)921 void Mips64Assembler::CmpLtD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
922   EmitFR(0x11, 0x15, ft, fs, fd, 0x04);
923 }
924 
CmpUltD(FpuRegister fd,FpuRegister fs,FpuRegister ft)925 void Mips64Assembler::CmpUltD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
926   EmitFR(0x11, 0x15, ft, fs, fd, 0x05);
927 }
928 
CmpLeD(FpuRegister fd,FpuRegister fs,FpuRegister ft)929 void Mips64Assembler::CmpLeD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
930   EmitFR(0x11, 0x15, ft, fs, fd, 0x06);
931 }
932 
CmpUleD(FpuRegister fd,FpuRegister fs,FpuRegister ft)933 void Mips64Assembler::CmpUleD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
934   EmitFR(0x11, 0x15, ft, fs, fd, 0x07);
935 }
936 
CmpOrD(FpuRegister fd,FpuRegister fs,FpuRegister ft)937 void Mips64Assembler::CmpOrD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
938   EmitFR(0x11, 0x15, ft, fs, fd, 0x11);
939 }
940 
CmpUneD(FpuRegister fd,FpuRegister fs,FpuRegister ft)941 void Mips64Assembler::CmpUneD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
942   EmitFR(0x11, 0x15, ft, fs, fd, 0x12);
943 }
944 
CmpNeD(FpuRegister fd,FpuRegister fs,FpuRegister ft)945 void Mips64Assembler::CmpNeD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
946   EmitFR(0x11, 0x15, ft, fs, fd, 0x13);
947 }
948 
Cvtsw(FpuRegister fd,FpuRegister fs)949 void Mips64Assembler::Cvtsw(FpuRegister fd, FpuRegister fs) {
950   EmitFR(0x11, 0x14, static_cast<FpuRegister>(0), fs, fd, 0x20);
951 }
952 
Cvtdw(FpuRegister fd,FpuRegister fs)953 void Mips64Assembler::Cvtdw(FpuRegister fd, FpuRegister fs) {
954   EmitFR(0x11, 0x14, static_cast<FpuRegister>(0), fs, fd, 0x21);
955 }
956 
Cvtsd(FpuRegister fd,FpuRegister fs)957 void Mips64Assembler::Cvtsd(FpuRegister fd, FpuRegister fs) {
958   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x20);
959 }
960 
Cvtds(FpuRegister fd,FpuRegister fs)961 void Mips64Assembler::Cvtds(FpuRegister fd, FpuRegister fs) {
962   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x21);
963 }
964 
Cvtsl(FpuRegister fd,FpuRegister fs)965 void Mips64Assembler::Cvtsl(FpuRegister fd, FpuRegister fs) {
966   EmitFR(0x11, 0x15, static_cast<FpuRegister>(0), fs, fd, 0x20);
967 }
968 
Cvtdl(FpuRegister fd,FpuRegister fs)969 void Mips64Assembler::Cvtdl(FpuRegister fd, FpuRegister fs) {
970   EmitFR(0x11, 0x15, static_cast<FpuRegister>(0), fs, fd, 0x21);
971 }
972 
Mfc1(GpuRegister rt,FpuRegister fs)973 void Mips64Assembler::Mfc1(GpuRegister rt, FpuRegister fs) {
974   EmitFR(0x11, 0x00, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
975 }
976 
Mfhc1(GpuRegister rt,FpuRegister fs)977 void Mips64Assembler::Mfhc1(GpuRegister rt, FpuRegister fs) {
978   EmitFR(0x11, 0x03, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
979 }
980 
Mtc1(GpuRegister rt,FpuRegister fs)981 void Mips64Assembler::Mtc1(GpuRegister rt, FpuRegister fs) {
982   EmitFR(0x11, 0x04, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
983 }
984 
Mthc1(GpuRegister rt,FpuRegister fs)985 void Mips64Assembler::Mthc1(GpuRegister rt, FpuRegister fs) {
986   EmitFR(0x11, 0x07, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
987 }
988 
Dmfc1(GpuRegister rt,FpuRegister fs)989 void Mips64Assembler::Dmfc1(GpuRegister rt, FpuRegister fs) {
990   EmitFR(0x11, 0x01, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
991 }
992 
Dmtc1(GpuRegister rt,FpuRegister fs)993 void Mips64Assembler::Dmtc1(GpuRegister rt, FpuRegister fs) {
994   EmitFR(0x11, 0x05, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
995 }
996 
Lwc1(FpuRegister ft,GpuRegister rs,uint16_t imm16)997 void Mips64Assembler::Lwc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
998   EmitI(0x31, rs, static_cast<GpuRegister>(ft), imm16);
999 }
1000 
Ldc1(FpuRegister ft,GpuRegister rs,uint16_t imm16)1001 void Mips64Assembler::Ldc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
1002   EmitI(0x35, rs, static_cast<GpuRegister>(ft), imm16);
1003 }
1004 
Swc1(FpuRegister ft,GpuRegister rs,uint16_t imm16)1005 void Mips64Assembler::Swc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
1006   EmitI(0x39, rs, static_cast<GpuRegister>(ft), imm16);
1007 }
1008 
Sdc1(FpuRegister ft,GpuRegister rs,uint16_t imm16)1009 void Mips64Assembler::Sdc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
1010   EmitI(0x3d, rs, static_cast<GpuRegister>(ft), imm16);
1011 }
1012 
Break()1013 void Mips64Assembler::Break() {
1014   EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
1015         static_cast<GpuRegister>(0), 0, 0xD);
1016 }
1017 
Nop()1018 void Mips64Assembler::Nop() {
1019   EmitR(0x0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
1020         static_cast<GpuRegister>(0), 0, 0x0);
1021 }
1022 
Move(GpuRegister rd,GpuRegister rs)1023 void Mips64Assembler::Move(GpuRegister rd, GpuRegister rs) {
1024   Or(rd, rs, ZERO);
1025 }
1026 
Clear(GpuRegister rd)1027 void Mips64Assembler::Clear(GpuRegister rd) {
1028   Move(rd, ZERO);
1029 }
1030 
Not(GpuRegister rd,GpuRegister rs)1031 void Mips64Assembler::Not(GpuRegister rd, GpuRegister rs) {
1032   Nor(rd, rs, ZERO);
1033 }
1034 
LoadConst32(GpuRegister rd,int32_t value)1035 void Mips64Assembler::LoadConst32(GpuRegister rd, int32_t value) {
1036   if (IsUint<16>(value)) {
1037     // Use OR with (unsigned) immediate to encode 16b unsigned int.
1038     Ori(rd, ZERO, value);
1039   } else if (IsInt<16>(value)) {
1040     // Use ADD with (signed) immediate to encode 16b signed int.
1041     Addiu(rd, ZERO, value);
1042   } else {
1043     Lui(rd, value >> 16);
1044     if (value & 0xFFFF)
1045       Ori(rd, rd, value);
1046   }
1047 }
1048 
LoadConst64(GpuRegister rd,int64_t value)1049 void Mips64Assembler::LoadConst64(GpuRegister rd, int64_t value) {
1050   int bit31 = (value & UINT64_C(0x80000000)) != 0;
1051 
1052   // Loads with 1 instruction.
1053   if (IsUint<16>(value)) {
1054     Ori(rd, ZERO, value);
1055   } else if (IsInt<16>(value)) {
1056     Daddiu(rd, ZERO, value);
1057   } else if ((value & 0xFFFF) == 0 && IsInt<16>(value >> 16)) {
1058     Lui(rd, value >> 16);
1059   } else if (IsInt<32>(value)) {
1060     // Loads with 2 instructions.
1061     Lui(rd, value >> 16);
1062     Ori(rd, rd, value);
1063   } else if ((value & 0xFFFF0000) == 0 && IsInt<16>(value >> 32)) {
1064     Ori(rd, ZERO, value);
1065     Dahi(rd, value >> 32);
1066   } else if ((value & UINT64_C(0xFFFFFFFF0000)) == 0) {
1067     Ori(rd, ZERO, value);
1068     Dati(rd, value >> 48);
1069   } else if ((value & 0xFFFF) == 0 &&
1070              (-32768 - bit31) <= (value >> 32) && (value >> 32) <= (32767 - bit31)) {
1071     Lui(rd, value >> 16);
1072     Dahi(rd, (value >> 32) + bit31);
1073   } else if ((value & 0xFFFF) == 0 && ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
1074     Lui(rd, value >> 16);
1075     Dati(rd, (value >> 48) + bit31);
1076   } else if (IsPowerOfTwo(value + UINT64_C(1))) {
1077     int shift_cnt = 64 - CTZ(value + UINT64_C(1));
1078     Daddiu(rd, ZERO, -1);
1079     if (shift_cnt < 32) {
1080       Dsrl(rd, rd, shift_cnt);
1081     } else {
1082       Dsrl32(rd, rd, shift_cnt & 31);
1083     }
1084   } else {
1085     int shift_cnt = CTZ(value);
1086     int64_t tmp = value >> shift_cnt;
1087     if (IsUint<16>(tmp)) {
1088       Ori(rd, ZERO, tmp);
1089       if (shift_cnt < 32) {
1090         Dsll(rd, rd, shift_cnt);
1091       } else {
1092         Dsll32(rd, rd, shift_cnt & 31);
1093       }
1094     } else if (IsInt<16>(tmp)) {
1095       Daddiu(rd, ZERO, tmp);
1096       if (shift_cnt < 32) {
1097         Dsll(rd, rd, shift_cnt);
1098       } else {
1099         Dsll32(rd, rd, shift_cnt & 31);
1100       }
1101     } else if (IsInt<32>(tmp)) {
1102       // Loads with 3 instructions.
1103       Lui(rd, tmp >> 16);
1104       Ori(rd, rd, tmp);
1105       if (shift_cnt < 32) {
1106         Dsll(rd, rd, shift_cnt);
1107       } else {
1108         Dsll32(rd, rd, shift_cnt & 31);
1109       }
1110     } else {
1111       shift_cnt = 16 + CTZ(value >> 16);
1112       tmp = value >> shift_cnt;
1113       if (IsUint<16>(tmp)) {
1114         Ori(rd, ZERO, tmp);
1115         if (shift_cnt < 32) {
1116           Dsll(rd, rd, shift_cnt);
1117         } else {
1118           Dsll32(rd, rd, shift_cnt & 31);
1119         }
1120         Ori(rd, rd, value);
1121       } else if (IsInt<16>(tmp)) {
1122         Daddiu(rd, ZERO, tmp);
1123         if (shift_cnt < 32) {
1124           Dsll(rd, rd, shift_cnt);
1125         } else {
1126           Dsll32(rd, rd, shift_cnt & 31);
1127         }
1128         Ori(rd, rd, value);
1129       } else {
1130         // Loads with 3-4 instructions.
1131         uint64_t tmp2 = value;
1132         bool used_lui = false;
1133         if (((tmp2 >> 16) & 0xFFFF) != 0 || (tmp2 & 0xFFFFFFFF) == 0) {
1134           Lui(rd, tmp2 >> 16);
1135           used_lui = true;
1136         }
1137         if ((tmp2 & 0xFFFF) != 0) {
1138           if (used_lui) {
1139             Ori(rd, rd, tmp2);
1140           } else {
1141             Ori(rd, ZERO, tmp2);
1142           }
1143         }
1144         if (bit31) {
1145           tmp2 += UINT64_C(0x100000000);
1146         }
1147         if (((tmp2 >> 32) & 0xFFFF) != 0) {
1148           Dahi(rd, tmp2 >> 32);
1149         }
1150         if (tmp2 & UINT64_C(0x800000000000)) {
1151           tmp2 += UINT64_C(0x1000000000000);
1152         }
1153         if ((tmp2 >> 48) != 0) {
1154           Dati(rd, tmp2 >> 48);
1155         }
1156       }
1157     }
1158   }
1159 }
1160 
Daddiu64(GpuRegister rt,GpuRegister rs,int64_t value,GpuRegister rtmp)1161 void Mips64Assembler::Daddiu64(GpuRegister rt, GpuRegister rs, int64_t value, GpuRegister rtmp) {
1162   if (IsInt<16>(value)) {
1163     Daddiu(rt, rs, value);
1164   } else {
1165     LoadConst64(rtmp, value);
1166     Daddu(rt, rs, rtmp);
1167   }
1168 }
1169 
InitShortOrLong(Mips64Assembler::Branch::OffsetBits offset_size,Mips64Assembler::Branch::Type short_type,Mips64Assembler::Branch::Type long_type)1170 void Mips64Assembler::Branch::InitShortOrLong(Mips64Assembler::Branch::OffsetBits offset_size,
1171                                               Mips64Assembler::Branch::Type short_type,
1172                                               Mips64Assembler::Branch::Type long_type) {
1173   type_ = (offset_size <= branch_info_[short_type].offset_size) ? short_type : long_type;
1174 }
1175 
InitializeType(bool is_call)1176 void Mips64Assembler::Branch::InitializeType(bool is_call) {
1177   OffsetBits offset_size = GetOffsetSizeNeeded(location_, target_);
1178   if (is_call) {
1179     InitShortOrLong(offset_size, kCall, kLongCall);
1180   } else if (condition_ == kUncond) {
1181     InitShortOrLong(offset_size, kUncondBranch, kLongUncondBranch);
1182   } else {
1183     if (condition_ == kCondEQZ || condition_ == kCondNEZ) {
1184       // Special case for beqzc/bnezc with longer offset than in other b<cond>c instructions.
1185       type_ = (offset_size <= kOffset23) ? kCondBranch : kLongCondBranch;
1186     } else {
1187       InitShortOrLong(offset_size, kCondBranch, kLongCondBranch);
1188     }
1189   }
1190   old_type_ = type_;
1191 }
1192 
IsNop(BranchCondition condition,GpuRegister lhs,GpuRegister rhs)1193 bool Mips64Assembler::Branch::IsNop(BranchCondition condition, GpuRegister lhs, GpuRegister rhs) {
1194   switch (condition) {
1195     case kCondLT:
1196     case kCondGT:
1197     case kCondNE:
1198     case kCondLTU:
1199       return lhs == rhs;
1200     default:
1201       return false;
1202   }
1203 }
1204 
IsUncond(BranchCondition condition,GpuRegister lhs,GpuRegister rhs)1205 bool Mips64Assembler::Branch::IsUncond(BranchCondition condition,
1206                                        GpuRegister lhs,
1207                                        GpuRegister rhs) {
1208   switch (condition) {
1209     case kUncond:
1210       return true;
1211     case kCondGE:
1212     case kCondLE:
1213     case kCondEQ:
1214     case kCondGEU:
1215       return lhs == rhs;
1216     default:
1217       return false;
1218   }
1219 }
1220 
Branch(uint32_t location,uint32_t target)1221 Mips64Assembler::Branch::Branch(uint32_t location, uint32_t target)
1222     : old_location_(location),
1223       location_(location),
1224       target_(target),
1225       lhs_reg_(ZERO),
1226       rhs_reg_(ZERO),
1227       condition_(kUncond) {
1228   InitializeType(false);
1229 }
1230 
Branch(uint32_t location,uint32_t target,Mips64Assembler::BranchCondition condition,GpuRegister lhs_reg,GpuRegister rhs_reg)1231 Mips64Assembler::Branch::Branch(uint32_t location,
1232                                 uint32_t target,
1233                                 Mips64Assembler::BranchCondition condition,
1234                                 GpuRegister lhs_reg,
1235                                 GpuRegister rhs_reg)
1236     : old_location_(location),
1237       location_(location),
1238       target_(target),
1239       lhs_reg_(lhs_reg),
1240       rhs_reg_(rhs_reg),
1241       condition_(condition) {
1242   CHECK_NE(condition, kUncond);
1243   switch (condition) {
1244     case kCondEQ:
1245     case kCondNE:
1246     case kCondLT:
1247     case kCondGE:
1248     case kCondLE:
1249     case kCondGT:
1250     case kCondLTU:
1251     case kCondGEU:
1252       CHECK_NE(lhs_reg, ZERO);
1253       CHECK_NE(rhs_reg, ZERO);
1254       break;
1255     case kCondLTZ:
1256     case kCondGEZ:
1257     case kCondLEZ:
1258     case kCondGTZ:
1259     case kCondEQZ:
1260     case kCondNEZ:
1261       CHECK_NE(lhs_reg, ZERO);
1262       CHECK_EQ(rhs_reg, ZERO);
1263       break;
1264     case kCondF:
1265     case kCondT:
1266       CHECK_EQ(rhs_reg, ZERO);
1267       break;
1268     case kUncond:
1269       UNREACHABLE();
1270   }
1271   CHECK(!IsNop(condition, lhs_reg, rhs_reg));
1272   if (IsUncond(condition, lhs_reg, rhs_reg)) {
1273     // Branch condition is always true, make the branch unconditional.
1274     condition_ = kUncond;
1275   }
1276   InitializeType(false);
1277 }
1278 
Branch(uint32_t location,uint32_t target,GpuRegister indirect_reg)1279 Mips64Assembler::Branch::Branch(uint32_t location, uint32_t target, GpuRegister indirect_reg)
1280     : old_location_(location),
1281       location_(location),
1282       target_(target),
1283       lhs_reg_(indirect_reg),
1284       rhs_reg_(ZERO),
1285       condition_(kUncond) {
1286   CHECK_NE(indirect_reg, ZERO);
1287   CHECK_NE(indirect_reg, AT);
1288   InitializeType(true);
1289 }
1290 
OppositeCondition(Mips64Assembler::BranchCondition cond)1291 Mips64Assembler::BranchCondition Mips64Assembler::Branch::OppositeCondition(
1292     Mips64Assembler::BranchCondition cond) {
1293   switch (cond) {
1294     case kCondLT:
1295       return kCondGE;
1296     case kCondGE:
1297       return kCondLT;
1298     case kCondLE:
1299       return kCondGT;
1300     case kCondGT:
1301       return kCondLE;
1302     case kCondLTZ:
1303       return kCondGEZ;
1304     case kCondGEZ:
1305       return kCondLTZ;
1306     case kCondLEZ:
1307       return kCondGTZ;
1308     case kCondGTZ:
1309       return kCondLEZ;
1310     case kCondEQ:
1311       return kCondNE;
1312     case kCondNE:
1313       return kCondEQ;
1314     case kCondEQZ:
1315       return kCondNEZ;
1316     case kCondNEZ:
1317       return kCondEQZ;
1318     case kCondLTU:
1319       return kCondGEU;
1320     case kCondGEU:
1321       return kCondLTU;
1322     case kCondF:
1323       return kCondT;
1324     case kCondT:
1325       return kCondF;
1326     case kUncond:
1327       LOG(FATAL) << "Unexpected branch condition " << cond;
1328   }
1329   UNREACHABLE();
1330 }
1331 
GetType() const1332 Mips64Assembler::Branch::Type Mips64Assembler::Branch::GetType() const {
1333   return type_;
1334 }
1335 
GetCondition() const1336 Mips64Assembler::BranchCondition Mips64Assembler::Branch::GetCondition() const {
1337   return condition_;
1338 }
1339 
GetLeftRegister() const1340 GpuRegister Mips64Assembler::Branch::GetLeftRegister() const {
1341   return lhs_reg_;
1342 }
1343 
GetRightRegister() const1344 GpuRegister Mips64Assembler::Branch::GetRightRegister() const {
1345   return rhs_reg_;
1346 }
1347 
GetTarget() const1348 uint32_t Mips64Assembler::Branch::GetTarget() const {
1349   return target_;
1350 }
1351 
GetLocation() const1352 uint32_t Mips64Assembler::Branch::GetLocation() const {
1353   return location_;
1354 }
1355 
GetOldLocation() const1356 uint32_t Mips64Assembler::Branch::GetOldLocation() const {
1357   return old_location_;
1358 }
1359 
GetLength() const1360 uint32_t Mips64Assembler::Branch::GetLength() const {
1361   return branch_info_[type_].length;
1362 }
1363 
GetOldLength() const1364 uint32_t Mips64Assembler::Branch::GetOldLength() const {
1365   return branch_info_[old_type_].length;
1366 }
1367 
GetSize() const1368 uint32_t Mips64Assembler::Branch::GetSize() const {
1369   return GetLength() * sizeof(uint32_t);
1370 }
1371 
GetOldSize() const1372 uint32_t Mips64Assembler::Branch::GetOldSize() const {
1373   return GetOldLength() * sizeof(uint32_t);
1374 }
1375 
GetEndLocation() const1376 uint32_t Mips64Assembler::Branch::GetEndLocation() const {
1377   return GetLocation() + GetSize();
1378 }
1379 
GetOldEndLocation() const1380 uint32_t Mips64Assembler::Branch::GetOldEndLocation() const {
1381   return GetOldLocation() + GetOldSize();
1382 }
1383 
IsLong() const1384 bool Mips64Assembler::Branch::IsLong() const {
1385   switch (type_) {
1386     // Short branches.
1387     case kUncondBranch:
1388     case kCondBranch:
1389     case kCall:
1390       return false;
1391     // Long branches.
1392     case kLongUncondBranch:
1393     case kLongCondBranch:
1394     case kLongCall:
1395       return true;
1396   }
1397   UNREACHABLE();
1398 }
1399 
IsResolved() const1400 bool Mips64Assembler::Branch::IsResolved() const {
1401   return target_ != kUnresolved;
1402 }
1403 
GetOffsetSize() const1404 Mips64Assembler::Branch::OffsetBits Mips64Assembler::Branch::GetOffsetSize() const {
1405   OffsetBits offset_size =
1406       (type_ == kCondBranch && (condition_ == kCondEQZ || condition_ == kCondNEZ))
1407           ? kOffset23
1408           : branch_info_[type_].offset_size;
1409   return offset_size;
1410 }
1411 
GetOffsetSizeNeeded(uint32_t location,uint32_t target)1412 Mips64Assembler::Branch::OffsetBits Mips64Assembler::Branch::GetOffsetSizeNeeded(uint32_t location,
1413                                                                                  uint32_t target) {
1414   // For unresolved targets assume the shortest encoding
1415   // (later it will be made longer if needed).
1416   if (target == kUnresolved)
1417     return kOffset16;
1418   int64_t distance = static_cast<int64_t>(target) - location;
1419   // To simplify calculations in composite branches consisting of multiple instructions
1420   // bump up the distance by a value larger than the max byte size of a composite branch.
1421   distance += (distance >= 0) ? kMaxBranchSize : -kMaxBranchSize;
1422   if (IsInt<kOffset16>(distance))
1423     return kOffset16;
1424   else if (IsInt<kOffset18>(distance))
1425     return kOffset18;
1426   else if (IsInt<kOffset21>(distance))
1427     return kOffset21;
1428   else if (IsInt<kOffset23>(distance))
1429     return kOffset23;
1430   else if (IsInt<kOffset28>(distance))
1431     return kOffset28;
1432   return kOffset32;
1433 }
1434 
Resolve(uint32_t target)1435 void Mips64Assembler::Branch::Resolve(uint32_t target) {
1436   target_ = target;
1437 }
1438 
Relocate(uint32_t expand_location,uint32_t delta)1439 void Mips64Assembler::Branch::Relocate(uint32_t expand_location, uint32_t delta) {
1440   if (location_ > expand_location) {
1441     location_ += delta;
1442   }
1443   if (!IsResolved()) {
1444     return;  // Don't know the target yet.
1445   }
1446   if (target_ > expand_location) {
1447     target_ += delta;
1448   }
1449 }
1450 
PromoteToLong()1451 void Mips64Assembler::Branch::PromoteToLong() {
1452   switch (type_) {
1453     // Short branches.
1454     case kUncondBranch:
1455       type_ = kLongUncondBranch;
1456       break;
1457     case kCondBranch:
1458       type_ = kLongCondBranch;
1459       break;
1460     case kCall:
1461       type_ = kLongCall;
1462       break;
1463     default:
1464       // Note: 'type_' is already long.
1465       break;
1466   }
1467   CHECK(IsLong());
1468 }
1469 
PromoteIfNeeded(uint32_t max_short_distance)1470 uint32_t Mips64Assembler::Branch::PromoteIfNeeded(uint32_t max_short_distance) {
1471   // If the branch is still unresolved or already long, nothing to do.
1472   if (IsLong() || !IsResolved()) {
1473     return 0;
1474   }
1475   // Promote the short branch to long if the offset size is too small
1476   // to hold the distance between location_ and target_.
1477   if (GetOffsetSizeNeeded(location_, target_) > GetOffsetSize()) {
1478     PromoteToLong();
1479     uint32_t old_size = GetOldSize();
1480     uint32_t new_size = GetSize();
1481     CHECK_GT(new_size, old_size);
1482     return new_size - old_size;
1483   }
1484   // The following logic is for debugging/testing purposes.
1485   // Promote some short branches to long when it's not really required.
1486   if (UNLIKELY(max_short_distance != std::numeric_limits<uint32_t>::max())) {
1487     int64_t distance = static_cast<int64_t>(target_) - location_;
1488     distance = (distance >= 0) ? distance : -distance;
1489     if (distance >= max_short_distance) {
1490       PromoteToLong();
1491       uint32_t old_size = GetOldSize();
1492       uint32_t new_size = GetSize();
1493       CHECK_GT(new_size, old_size);
1494       return new_size - old_size;
1495     }
1496   }
1497   return 0;
1498 }
1499 
GetOffsetLocation() const1500 uint32_t Mips64Assembler::Branch::GetOffsetLocation() const {
1501   return location_ + branch_info_[type_].instr_offset * sizeof(uint32_t);
1502 }
1503 
GetOffset() const1504 uint32_t Mips64Assembler::Branch::GetOffset() const {
1505   CHECK(IsResolved());
1506   uint32_t ofs_mask = 0xFFFFFFFF >> (32 - GetOffsetSize());
1507   // Calculate the byte distance between instructions and also account for
1508   // different PC-relative origins.
1509   uint32_t offset = target_ - GetOffsetLocation() - branch_info_[type_].pc_org * sizeof(uint32_t);
1510   // Prepare the offset for encoding into the instruction(s).
1511   offset = (offset & ofs_mask) >> branch_info_[type_].offset_shift;
1512   return offset;
1513 }
1514 
GetBranch(uint32_t branch_id)1515 Mips64Assembler::Branch* Mips64Assembler::GetBranch(uint32_t branch_id) {
1516   CHECK_LT(branch_id, branches_.size());
1517   return &branches_[branch_id];
1518 }
1519 
GetBranch(uint32_t branch_id) const1520 const Mips64Assembler::Branch* Mips64Assembler::GetBranch(uint32_t branch_id) const {
1521   CHECK_LT(branch_id, branches_.size());
1522   return &branches_[branch_id];
1523 }
1524 
Bind(Mips64Label * label)1525 void Mips64Assembler::Bind(Mips64Label* label) {
1526   CHECK(!label->IsBound());
1527   uint32_t bound_pc = buffer_.Size();
1528 
1529   // Walk the list of branches referring to and preceding this label.
1530   // Store the previously unknown target addresses in them.
1531   while (label->IsLinked()) {
1532     uint32_t branch_id = label->Position();
1533     Branch* branch = GetBranch(branch_id);
1534     branch->Resolve(bound_pc);
1535 
1536     uint32_t branch_location = branch->GetLocation();
1537     // Extract the location of the previous branch in the list (walking the list backwards;
1538     // the previous branch ID was stored in the space reserved for this branch).
1539     uint32_t prev = buffer_.Load<uint32_t>(branch_location);
1540 
1541     // On to the previous branch in the list...
1542     label->position_ = prev;
1543   }
1544 
1545   // Now make the label object contain its own location (relative to the end of the preceding
1546   // branch, if any; it will be used by the branches referring to and following this label).
1547   label->prev_branch_id_plus_one_ = branches_.size();
1548   if (label->prev_branch_id_plus_one_) {
1549     uint32_t branch_id = label->prev_branch_id_plus_one_ - 1;
1550     const Branch* branch = GetBranch(branch_id);
1551     bound_pc -= branch->GetEndLocation();
1552   }
1553   label->BindTo(bound_pc);
1554 }
1555 
GetLabelLocation(Mips64Label * label) const1556 uint32_t Mips64Assembler::GetLabelLocation(Mips64Label* label) const {
1557   CHECK(label->IsBound());
1558   uint32_t target = label->Position();
1559   if (label->prev_branch_id_plus_one_) {
1560     // Get label location based on the branch preceding it.
1561     uint32_t branch_id = label->prev_branch_id_plus_one_ - 1;
1562     const Branch* branch = GetBranch(branch_id);
1563     target += branch->GetEndLocation();
1564   }
1565   return target;
1566 }
1567 
GetAdjustedPosition(uint32_t old_position)1568 uint32_t Mips64Assembler::GetAdjustedPosition(uint32_t old_position) {
1569   // We can reconstruct the adjustment by going through all the branches from the beginning
1570   // up to the old_position. Since we expect AdjustedPosition() to be called in a loop
1571   // with increasing old_position, we can use the data from last AdjustedPosition() to
1572   // continue where we left off and the whole loop should be O(m+n) where m is the number
1573   // of positions to adjust and n is the number of branches.
1574   if (old_position < last_old_position_) {
1575     last_position_adjustment_ = 0;
1576     last_old_position_ = 0;
1577     last_branch_id_ = 0;
1578   }
1579   while (last_branch_id_ != branches_.size()) {
1580     const Branch* branch = GetBranch(last_branch_id_);
1581     if (branch->GetLocation() >= old_position + last_position_adjustment_) {
1582       break;
1583     }
1584     last_position_adjustment_ += branch->GetSize() - branch->GetOldSize();
1585     ++last_branch_id_;
1586   }
1587   last_old_position_ = old_position;
1588   return old_position + last_position_adjustment_;
1589 }
1590 
FinalizeLabeledBranch(Mips64Label * label)1591 void Mips64Assembler::FinalizeLabeledBranch(Mips64Label* label) {
1592   uint32_t length = branches_.back().GetLength();
1593   if (!label->IsBound()) {
1594     // Branch forward (to a following label), distance is unknown.
1595     // The first branch forward will contain 0, serving as the terminator of
1596     // the list of forward-reaching branches.
1597     Emit(label->position_);
1598     length--;
1599     // Now make the label object point to this branch
1600     // (this forms a linked list of branches preceding this label).
1601     uint32_t branch_id = branches_.size() - 1;
1602     label->LinkTo(branch_id);
1603   }
1604   // Reserve space for the branch.
1605   while (length--) {
1606     Nop();
1607   }
1608 }
1609 
Buncond(Mips64Label * label)1610 void Mips64Assembler::Buncond(Mips64Label* label) {
1611   uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
1612   branches_.emplace_back(buffer_.Size(), target);
1613   FinalizeLabeledBranch(label);
1614 }
1615 
Bcond(Mips64Label * label,BranchCondition condition,GpuRegister lhs,GpuRegister rhs)1616 void Mips64Assembler::Bcond(Mips64Label* label,
1617                             BranchCondition condition,
1618                             GpuRegister lhs,
1619                             GpuRegister rhs) {
1620   // If lhs = rhs, this can be a NOP.
1621   if (Branch::IsNop(condition, lhs, rhs)) {
1622     return;
1623   }
1624   uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
1625   branches_.emplace_back(buffer_.Size(), target, condition, lhs, rhs);
1626   FinalizeLabeledBranch(label);
1627 }
1628 
Call(Mips64Label * label,GpuRegister indirect_reg)1629 void Mips64Assembler::Call(Mips64Label* label, GpuRegister indirect_reg) {
1630   uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
1631   branches_.emplace_back(buffer_.Size(), target, indirect_reg);
1632   FinalizeLabeledBranch(label);
1633 }
1634 
PromoteBranches()1635 void Mips64Assembler::PromoteBranches() {
1636   // Promote short branches to long as necessary.
1637   bool changed;
1638   do {
1639     changed = false;
1640     for (auto& branch : branches_) {
1641       CHECK(branch.IsResolved());
1642       uint32_t delta = branch.PromoteIfNeeded();
1643       // If this branch has been promoted and needs to expand in size,
1644       // relocate all branches by the expansion size.
1645       if (delta) {
1646         changed = true;
1647         uint32_t expand_location = branch.GetLocation();
1648         for (auto& branch2 : branches_) {
1649           branch2.Relocate(expand_location, delta);
1650         }
1651       }
1652     }
1653   } while (changed);
1654 
1655   // Account for branch expansion by resizing the code buffer
1656   // and moving the code in it to its final location.
1657   size_t branch_count = branches_.size();
1658   if (branch_count > 0) {
1659     // Resize.
1660     Branch& last_branch = branches_[branch_count - 1];
1661     uint32_t size_delta = last_branch.GetEndLocation() - last_branch.GetOldEndLocation();
1662     uint32_t old_size = buffer_.Size();
1663     buffer_.Resize(old_size + size_delta);
1664     // Move the code residing between branch placeholders.
1665     uint32_t end = old_size;
1666     for (size_t i = branch_count; i > 0; ) {
1667       Branch& branch = branches_[--i];
1668       uint32_t size = end - branch.GetOldEndLocation();
1669       buffer_.Move(branch.GetEndLocation(), branch.GetOldEndLocation(), size);
1670       end = branch.GetOldLocation();
1671     }
1672   }
1673 }
1674 
1675 // Note: make sure branch_info_[] and EmitBranch() are kept synchronized.
1676 const Mips64Assembler::Branch::BranchInfo Mips64Assembler::Branch::branch_info_[] = {
1677   // Short branches.
1678   {  1, 0, 1, Mips64Assembler::Branch::kOffset28, 2 },  // kUncondBranch
1679   {  2, 0, 1, Mips64Assembler::Branch::kOffset18, 2 },  // kCondBranch
1680                                                         // Exception: kOffset23 for beqzc/bnezc
1681   {  2, 0, 0, Mips64Assembler::Branch::kOffset21, 2 },  // kCall
1682   // Long branches.
1683   {  2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 },  // kLongUncondBranch
1684   {  3, 1, 0, Mips64Assembler::Branch::kOffset32, 0 },  // kLongCondBranch
1685   {  3, 0, 0, Mips64Assembler::Branch::kOffset32, 0 },  // kLongCall
1686 };
1687 
1688 // Note: make sure branch_info_[] and EmitBranch() are kept synchronized.
EmitBranch(Mips64Assembler::Branch * branch)1689 void Mips64Assembler::EmitBranch(Mips64Assembler::Branch* branch) {
1690   CHECK(overwriting_);
1691   overwrite_location_ = branch->GetLocation();
1692   uint32_t offset = branch->GetOffset();
1693   BranchCondition condition = branch->GetCondition();
1694   GpuRegister lhs = branch->GetLeftRegister();
1695   GpuRegister rhs = branch->GetRightRegister();
1696   switch (branch->GetType()) {
1697     // Short branches.
1698     case Branch::kUncondBranch:
1699       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
1700       Bc(offset);
1701       break;
1702     case Branch::kCondBranch:
1703       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
1704       EmitBcondc(condition, lhs, rhs, offset);
1705       Nop();  // TODO: improve by filling the forbidden/delay slot.
1706       break;
1707     case Branch::kCall:
1708       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
1709       Addiupc(lhs, offset);
1710       Jialc(lhs, 0);
1711       break;
1712 
1713     // Long branches.
1714     case Branch::kLongUncondBranch:
1715       offset += (offset & 0x8000) << 1;  // Account for sign extension in jic.
1716       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
1717       Auipc(AT, High16Bits(offset));
1718       Jic(AT, Low16Bits(offset));
1719       break;
1720     case Branch::kLongCondBranch:
1721       EmitBcondc(Branch::OppositeCondition(condition), lhs, rhs, 2);
1722       offset += (offset & 0x8000) << 1;  // Account for sign extension in jic.
1723       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
1724       Auipc(AT, High16Bits(offset));
1725       Jic(AT, Low16Bits(offset));
1726       break;
1727     case Branch::kLongCall:
1728       offset += (offset & 0x8000) << 1;  // Account for sign extension in daddiu.
1729       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
1730       Auipc(lhs, High16Bits(offset));
1731       Daddiu(lhs, lhs, Low16Bits(offset));
1732       Jialc(lhs, 0);
1733       break;
1734   }
1735   CHECK_EQ(overwrite_location_, branch->GetEndLocation());
1736   CHECK_LT(branch->GetSize(), static_cast<uint32_t>(Branch::kMaxBranchSize));
1737 }
1738 
Bc(Mips64Label * label)1739 void Mips64Assembler::Bc(Mips64Label* label) {
1740   Buncond(label);
1741 }
1742 
Jialc(Mips64Label * label,GpuRegister indirect_reg)1743 void Mips64Assembler::Jialc(Mips64Label* label, GpuRegister indirect_reg) {
1744   Call(label, indirect_reg);
1745 }
1746 
Bltc(GpuRegister rs,GpuRegister rt,Mips64Label * label)1747 void Mips64Assembler::Bltc(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
1748   Bcond(label, kCondLT, rs, rt);
1749 }
1750 
Bltzc(GpuRegister rt,Mips64Label * label)1751 void Mips64Assembler::Bltzc(GpuRegister rt, Mips64Label* label) {
1752   Bcond(label, kCondLTZ, rt);
1753 }
1754 
Bgtzc(GpuRegister rt,Mips64Label * label)1755 void Mips64Assembler::Bgtzc(GpuRegister rt, Mips64Label* label) {
1756   Bcond(label, kCondGTZ, rt);
1757 }
1758 
Bgec(GpuRegister rs,GpuRegister rt,Mips64Label * label)1759 void Mips64Assembler::Bgec(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
1760   Bcond(label, kCondGE, rs, rt);
1761 }
1762 
Bgezc(GpuRegister rt,Mips64Label * label)1763 void Mips64Assembler::Bgezc(GpuRegister rt, Mips64Label* label) {
1764   Bcond(label, kCondGEZ, rt);
1765 }
1766 
Blezc(GpuRegister rt,Mips64Label * label)1767 void Mips64Assembler::Blezc(GpuRegister rt, Mips64Label* label) {
1768   Bcond(label, kCondLEZ, rt);
1769 }
1770 
Bltuc(GpuRegister rs,GpuRegister rt,Mips64Label * label)1771 void Mips64Assembler::Bltuc(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
1772   Bcond(label, kCondLTU, rs, rt);
1773 }
1774 
Bgeuc(GpuRegister rs,GpuRegister rt,Mips64Label * label)1775 void Mips64Assembler::Bgeuc(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
1776   Bcond(label, kCondGEU, rs, rt);
1777 }
1778 
Beqc(GpuRegister rs,GpuRegister rt,Mips64Label * label)1779 void Mips64Assembler::Beqc(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
1780   Bcond(label, kCondEQ, rs, rt);
1781 }
1782 
Bnec(GpuRegister rs,GpuRegister rt,Mips64Label * label)1783 void Mips64Assembler::Bnec(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
1784   Bcond(label, kCondNE, rs, rt);
1785 }
1786 
Beqzc(GpuRegister rs,Mips64Label * label)1787 void Mips64Assembler::Beqzc(GpuRegister rs, Mips64Label* label) {
1788   Bcond(label, kCondEQZ, rs);
1789 }
1790 
Bnezc(GpuRegister rs,Mips64Label * label)1791 void Mips64Assembler::Bnezc(GpuRegister rs, Mips64Label* label) {
1792   Bcond(label, kCondNEZ, rs);
1793 }
1794 
Bc1eqz(FpuRegister ft,Mips64Label * label)1795 void Mips64Assembler::Bc1eqz(FpuRegister ft, Mips64Label* label) {
1796   Bcond(label, kCondF, static_cast<GpuRegister>(ft), ZERO);
1797 }
1798 
Bc1nez(FpuRegister ft,Mips64Label * label)1799 void Mips64Assembler::Bc1nez(FpuRegister ft, Mips64Label* label) {
1800   Bcond(label, kCondT, static_cast<GpuRegister>(ft), ZERO);
1801 }
1802 
LoadFromOffset(LoadOperandType type,GpuRegister reg,GpuRegister base,int32_t offset)1803 void Mips64Assembler::LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base,
1804                                      int32_t offset) {
1805   if (!IsInt<16>(offset) ||
1806       (type == kLoadDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
1807        !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
1808     LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
1809     Daddu(AT, AT, base);
1810     base = AT;
1811     offset &= (kMips64DoublewordSize - 1);
1812   }
1813 
1814   switch (type) {
1815     case kLoadSignedByte:
1816       Lb(reg, base, offset);
1817       break;
1818     case kLoadUnsignedByte:
1819       Lbu(reg, base, offset);
1820       break;
1821     case kLoadSignedHalfword:
1822       Lh(reg, base, offset);
1823       break;
1824     case kLoadUnsignedHalfword:
1825       Lhu(reg, base, offset);
1826       break;
1827     case kLoadWord:
1828       CHECK_ALIGNED(offset, kMips64WordSize);
1829       Lw(reg, base, offset);
1830       break;
1831     case kLoadUnsignedWord:
1832       CHECK_ALIGNED(offset, kMips64WordSize);
1833       Lwu(reg, base, offset);
1834       break;
1835     case kLoadDoubleword:
1836       if (!IsAligned<kMips64DoublewordSize>(offset)) {
1837         CHECK_ALIGNED(offset, kMips64WordSize);
1838         Lwu(reg, base, offset);
1839         Lwu(TMP2, base, offset + kMips64WordSize);
1840         Dinsu(reg, TMP2, 32, 32);
1841       } else {
1842         Ld(reg, base, offset);
1843       }
1844       break;
1845   }
1846 }
1847 
LoadFpuFromOffset(LoadOperandType type,FpuRegister reg,GpuRegister base,int32_t offset)1848 void Mips64Assembler::LoadFpuFromOffset(LoadOperandType type, FpuRegister reg, GpuRegister base,
1849                                         int32_t offset) {
1850   if (!IsInt<16>(offset) ||
1851       (type == kLoadDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
1852        !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
1853     LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
1854     Daddu(AT, AT, base);
1855     base = AT;
1856     offset &= (kMips64DoublewordSize - 1);
1857   }
1858 
1859   switch (type) {
1860     case kLoadWord:
1861       CHECK_ALIGNED(offset, kMips64WordSize);
1862       Lwc1(reg, base, offset);
1863       break;
1864     case kLoadDoubleword:
1865       if (!IsAligned<kMips64DoublewordSize>(offset)) {
1866         CHECK_ALIGNED(offset, kMips64WordSize);
1867         Lwc1(reg, base, offset);
1868         Lw(TMP2, base, offset + kMips64WordSize);
1869         Mthc1(TMP2, reg);
1870       } else {
1871         Ldc1(reg, base, offset);
1872       }
1873       break;
1874     default:
1875       LOG(FATAL) << "UNREACHABLE";
1876   }
1877 }
1878 
EmitLoad(ManagedRegister m_dst,GpuRegister src_register,int32_t src_offset,size_t size)1879 void Mips64Assembler::EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset,
1880                                size_t size) {
1881   Mips64ManagedRegister dst = m_dst.AsMips64();
1882   if (dst.IsNoRegister()) {
1883     CHECK_EQ(0u, size) << dst;
1884   } else if (dst.IsGpuRegister()) {
1885     if (size == 4) {
1886       LoadFromOffset(kLoadWord, dst.AsGpuRegister(), src_register, src_offset);
1887     } else if (size == 8) {
1888       CHECK_EQ(8u, size) << dst;
1889       LoadFromOffset(kLoadDoubleword, dst.AsGpuRegister(), src_register, src_offset);
1890     } else {
1891       UNIMPLEMENTED(FATAL) << "We only support Load() of size 4 and 8";
1892     }
1893   } else if (dst.IsFpuRegister()) {
1894     if (size == 4) {
1895       CHECK_EQ(4u, size) << dst;
1896       LoadFpuFromOffset(kLoadWord, dst.AsFpuRegister(), src_register, src_offset);
1897     } else if (size == 8) {
1898       CHECK_EQ(8u, size) << dst;
1899       LoadFpuFromOffset(kLoadDoubleword, dst.AsFpuRegister(), src_register, src_offset);
1900     } else {
1901       UNIMPLEMENTED(FATAL) << "We only support Load() of size 4 and 8";
1902     }
1903   }
1904 }
1905 
StoreToOffset(StoreOperandType type,GpuRegister reg,GpuRegister base,int32_t offset)1906 void Mips64Assembler::StoreToOffset(StoreOperandType type, GpuRegister reg, GpuRegister base,
1907                                     int32_t offset) {
1908   if (!IsInt<16>(offset) ||
1909       (type == kStoreDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
1910        !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
1911     LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
1912     Daddu(AT, AT, base);
1913     base = AT;
1914     offset &= (kMips64DoublewordSize - 1);
1915   }
1916 
1917   switch (type) {
1918     case kStoreByte:
1919       Sb(reg, base, offset);
1920       break;
1921     case kStoreHalfword:
1922       Sh(reg, base, offset);
1923       break;
1924     case kStoreWord:
1925       CHECK_ALIGNED(offset, kMips64WordSize);
1926       Sw(reg, base, offset);
1927       break;
1928     case kStoreDoubleword:
1929       if (!IsAligned<kMips64DoublewordSize>(offset)) {
1930         CHECK_ALIGNED(offset, kMips64WordSize);
1931         Sw(reg, base, offset);
1932         Dsrl32(TMP2, reg, 0);
1933         Sw(TMP2, base, offset + kMips64WordSize);
1934       } else {
1935         Sd(reg, base, offset);
1936       }
1937       break;
1938     default:
1939       LOG(FATAL) << "UNREACHABLE";
1940   }
1941 }
1942 
StoreFpuToOffset(StoreOperandType type,FpuRegister reg,GpuRegister base,int32_t offset)1943 void Mips64Assembler::StoreFpuToOffset(StoreOperandType type, FpuRegister reg, GpuRegister base,
1944                                        int32_t offset) {
1945   if (!IsInt<16>(offset) ||
1946       (type == kStoreDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
1947        !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
1948     LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
1949     Daddu(AT, AT, base);
1950     base = AT;
1951     offset &= (kMips64DoublewordSize - 1);
1952   }
1953 
1954   switch (type) {
1955     case kStoreWord:
1956       CHECK_ALIGNED(offset, kMips64WordSize);
1957       Swc1(reg, base, offset);
1958       break;
1959     case kStoreDoubleword:
1960       if (!IsAligned<kMips64DoublewordSize>(offset)) {
1961         CHECK_ALIGNED(offset, kMips64WordSize);
1962         Mfhc1(TMP2, reg);
1963         Swc1(reg, base, offset);
1964         Sw(TMP2, base, offset + kMips64WordSize);
1965       } else {
1966         Sdc1(reg, base, offset);
1967       }
1968       break;
1969     default:
1970       LOG(FATAL) << "UNREACHABLE";
1971   }
1972 }
1973 
DWARFReg(GpuRegister reg)1974 static dwarf::Reg DWARFReg(GpuRegister reg) {
1975   return dwarf::Reg::Mips64Core(static_cast<int>(reg));
1976 }
1977 
1978 constexpr size_t kFramePointerSize = 8;
1979 
BuildFrame(size_t frame_size,ManagedRegister method_reg,const std::vector<ManagedRegister> & callee_save_regs,const ManagedRegisterEntrySpills & entry_spills)1980 void Mips64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
1981                                  const std::vector<ManagedRegister>& callee_save_regs,
1982                                  const ManagedRegisterEntrySpills& entry_spills) {
1983   CHECK_ALIGNED(frame_size, kStackAlignment);
1984   DCHECK(!overwriting_);
1985 
1986   // Increase frame to required size.
1987   IncreaseFrameSize(frame_size);
1988 
1989   // Push callee saves and return address
1990   int stack_offset = frame_size - kFramePointerSize;
1991   StoreToOffset(kStoreDoubleword, RA, SP, stack_offset);
1992   cfi_.RelOffset(DWARFReg(RA), stack_offset);
1993   for (int i = callee_save_regs.size() - 1; i >= 0; --i) {
1994     stack_offset -= kFramePointerSize;
1995     GpuRegister reg = callee_save_regs.at(i).AsMips64().AsGpuRegister();
1996     StoreToOffset(kStoreDoubleword, reg, SP, stack_offset);
1997     cfi_.RelOffset(DWARFReg(reg), stack_offset);
1998   }
1999 
2000   // Write out Method*.
2001   StoreToOffset(kStoreDoubleword, method_reg.AsMips64().AsGpuRegister(), SP, 0);
2002 
2003   // Write out entry spills.
2004   int32_t offset = frame_size + kFramePointerSize;
2005   for (size_t i = 0; i < entry_spills.size(); ++i) {
2006     Mips64ManagedRegister reg = entry_spills.at(i).AsMips64();
2007     ManagedRegisterSpill spill = entry_spills.at(i);
2008     int32_t size = spill.getSize();
2009     if (reg.IsNoRegister()) {
2010       // only increment stack offset.
2011       offset += size;
2012     } else if (reg.IsFpuRegister()) {
2013       StoreFpuToOffset((size == 4) ? kStoreWord : kStoreDoubleword,
2014           reg.AsFpuRegister(), SP, offset);
2015       offset += size;
2016     } else if (reg.IsGpuRegister()) {
2017       StoreToOffset((size == 4) ? kStoreWord : kStoreDoubleword,
2018           reg.AsGpuRegister(), SP, offset);
2019       offset += size;
2020     }
2021   }
2022 }
2023 
RemoveFrame(size_t frame_size,const std::vector<ManagedRegister> & callee_save_regs)2024 void Mips64Assembler::RemoveFrame(size_t frame_size,
2025                                   const std::vector<ManagedRegister>& callee_save_regs) {
2026   CHECK_ALIGNED(frame_size, kStackAlignment);
2027   DCHECK(!overwriting_);
2028   cfi_.RememberState();
2029 
2030   // Pop callee saves and return address
2031   int stack_offset = frame_size - (callee_save_regs.size() * kFramePointerSize) - kFramePointerSize;
2032   for (size_t i = 0; i < callee_save_regs.size(); ++i) {
2033     GpuRegister reg = callee_save_regs.at(i).AsMips64().AsGpuRegister();
2034     LoadFromOffset(kLoadDoubleword, reg, SP, stack_offset);
2035     cfi_.Restore(DWARFReg(reg));
2036     stack_offset += kFramePointerSize;
2037   }
2038   LoadFromOffset(kLoadDoubleword, RA, SP, stack_offset);
2039   cfi_.Restore(DWARFReg(RA));
2040 
2041   // Decrease frame to required size.
2042   DecreaseFrameSize(frame_size);
2043 
2044   // Then jump to the return address.
2045   Jr(RA);
2046   Nop();
2047 
2048   // The CFI should be restored for any code that follows the exit block.
2049   cfi_.RestoreState();
2050   cfi_.DefCFAOffset(frame_size);
2051 }
2052 
IncreaseFrameSize(size_t adjust)2053 void Mips64Assembler::IncreaseFrameSize(size_t adjust) {
2054   CHECK_ALIGNED(adjust, kFramePointerSize);
2055   DCHECK(!overwriting_);
2056   Daddiu64(SP, SP, static_cast<int32_t>(-adjust));
2057   cfi_.AdjustCFAOffset(adjust);
2058 }
2059 
DecreaseFrameSize(size_t adjust)2060 void Mips64Assembler::DecreaseFrameSize(size_t adjust) {
2061   CHECK_ALIGNED(adjust, kFramePointerSize);
2062   DCHECK(!overwriting_);
2063   Daddiu64(SP, SP, static_cast<int32_t>(adjust));
2064   cfi_.AdjustCFAOffset(-adjust);
2065 }
2066 
Store(FrameOffset dest,ManagedRegister msrc,size_t size)2067 void Mips64Assembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
2068   Mips64ManagedRegister src = msrc.AsMips64();
2069   if (src.IsNoRegister()) {
2070     CHECK_EQ(0u, size);
2071   } else if (src.IsGpuRegister()) {
2072     CHECK(size == 4 || size == 8) << size;
2073     if (size == 8) {
2074       StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
2075     } else if (size == 4) {
2076       StoreToOffset(kStoreWord, src.AsGpuRegister(), SP, dest.Int32Value());
2077     } else {
2078       UNIMPLEMENTED(FATAL) << "We only support Store() of size 4 and 8";
2079     }
2080   } else if (src.IsFpuRegister()) {
2081     CHECK(size == 4 || size == 8) << size;
2082     if (size == 8) {
2083       StoreFpuToOffset(kStoreDoubleword, src.AsFpuRegister(), SP, dest.Int32Value());
2084     } else if (size == 4) {
2085       StoreFpuToOffset(kStoreWord, src.AsFpuRegister(), SP, dest.Int32Value());
2086     } else {
2087       UNIMPLEMENTED(FATAL) << "We only support Store() of size 4 and 8";
2088     }
2089   }
2090 }
2091 
StoreRef(FrameOffset dest,ManagedRegister msrc)2092 void Mips64Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
2093   Mips64ManagedRegister src = msrc.AsMips64();
2094   CHECK(src.IsGpuRegister());
2095   StoreToOffset(kStoreWord, src.AsGpuRegister(), SP, dest.Int32Value());
2096 }
2097 
StoreRawPtr(FrameOffset dest,ManagedRegister msrc)2098 void Mips64Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
2099   Mips64ManagedRegister src = msrc.AsMips64();
2100   CHECK(src.IsGpuRegister());
2101   StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
2102 }
2103 
StoreImmediateToFrame(FrameOffset dest,uint32_t imm,ManagedRegister mscratch)2104 void Mips64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
2105                                             ManagedRegister mscratch) {
2106   Mips64ManagedRegister scratch = mscratch.AsMips64();
2107   CHECK(scratch.IsGpuRegister()) << scratch;
2108   LoadConst32(scratch.AsGpuRegister(), imm);
2109   StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
2110 }
2111 
StoreStackOffsetToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)2112 void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs,
2113                                                  FrameOffset fr_offs,
2114                                                  ManagedRegister mscratch) {
2115   Mips64ManagedRegister scratch = mscratch.AsMips64();
2116   CHECK(scratch.IsGpuRegister()) << scratch;
2117   Daddiu64(scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
2118   StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
2119 }
2120 
StoreStackPointerToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs)2121 void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs) {
2122   StoreToOffset(kStoreDoubleword, SP, S1, thr_offs.Int32Value());
2123 }
2124 
StoreSpanning(FrameOffset dest,ManagedRegister msrc,FrameOffset in_off,ManagedRegister mscratch)2125 void Mips64Assembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
2126                                     FrameOffset in_off, ManagedRegister mscratch) {
2127   Mips64ManagedRegister src = msrc.AsMips64();
2128   Mips64ManagedRegister scratch = mscratch.AsMips64();
2129   StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
2130   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), SP, in_off.Int32Value());
2131   StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value() + 8);
2132 }
2133 
Load(ManagedRegister mdest,FrameOffset src,size_t size)2134 void Mips64Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
2135   return EmitLoad(mdest, SP, src.Int32Value(), size);
2136 }
2137 
LoadFromThread64(ManagedRegister mdest,ThreadOffset<kMips64DoublewordSize> src,size_t size)2138 void Mips64Assembler::LoadFromThread64(ManagedRegister mdest,
2139                                        ThreadOffset<kMips64DoublewordSize> src,
2140                                        size_t size) {
2141   return EmitLoad(mdest, S1, src.Int32Value(), size);
2142 }
2143 
LoadRef(ManagedRegister mdest,FrameOffset src)2144 void Mips64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
2145   Mips64ManagedRegister dest = mdest.AsMips64();
2146   CHECK(dest.IsGpuRegister());
2147   LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(), SP, src.Int32Value());
2148 }
2149 
LoadRef(ManagedRegister mdest,ManagedRegister base,MemberOffset offs,bool unpoison_reference)2150 void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
2151                               bool unpoison_reference) {
2152   Mips64ManagedRegister dest = mdest.AsMips64();
2153   CHECK(dest.IsGpuRegister() && base.AsMips64().IsGpuRegister());
2154   LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(),
2155                  base.AsMips64().AsGpuRegister(), offs.Int32Value());
2156   if (kPoisonHeapReferences && unpoison_reference) {
2157     // TODO: review
2158     // Negate the 32-bit ref
2159     Dsubu(dest.AsGpuRegister(), ZERO, dest.AsGpuRegister());
2160     // And constrain it to 32 bits (zero-extend into bits 32 through 63) as on Arm64 and x86/64
2161     Dext(dest.AsGpuRegister(), dest.AsGpuRegister(), 0, 32);
2162   }
2163 }
2164 
LoadRawPtr(ManagedRegister mdest,ManagedRegister base,Offset offs)2165 void Mips64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
2166                                  Offset offs) {
2167   Mips64ManagedRegister dest = mdest.AsMips64();
2168   CHECK(dest.IsGpuRegister() && base.AsMips64().IsGpuRegister());
2169   LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(),
2170                  base.AsMips64().AsGpuRegister(), offs.Int32Value());
2171 }
2172 
LoadRawPtrFromThread64(ManagedRegister mdest,ThreadOffset<kMips64DoublewordSize> offs)2173 void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest,
2174                                              ThreadOffset<kMips64DoublewordSize> offs) {
2175   Mips64ManagedRegister dest = mdest.AsMips64();
2176   CHECK(dest.IsGpuRegister());
2177   LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(), S1, offs.Int32Value());
2178 }
2179 
SignExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,size_t size ATTRIBUTE_UNUSED)2180 void Mips64Assembler::SignExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,
2181                                  size_t size ATTRIBUTE_UNUSED) {
2182   UNIMPLEMENTED(FATAL) << "No sign extension necessary for MIPS64";
2183 }
2184 
ZeroExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,size_t size ATTRIBUTE_UNUSED)2185 void Mips64Assembler::ZeroExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,
2186                                  size_t size ATTRIBUTE_UNUSED) {
2187   UNIMPLEMENTED(FATAL) << "No zero extension necessary for MIPS64";
2188 }
2189 
Move(ManagedRegister mdest,ManagedRegister msrc,size_t size)2190 void Mips64Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
2191   Mips64ManagedRegister dest = mdest.AsMips64();
2192   Mips64ManagedRegister src = msrc.AsMips64();
2193   if (!dest.Equals(src)) {
2194     if (dest.IsGpuRegister()) {
2195       CHECK(src.IsGpuRegister()) << src;
2196       Move(dest.AsGpuRegister(), src.AsGpuRegister());
2197     } else if (dest.IsFpuRegister()) {
2198       CHECK(src.IsFpuRegister()) << src;
2199       if (size == 4) {
2200         MovS(dest.AsFpuRegister(), src.AsFpuRegister());
2201       } else if (size == 8) {
2202         MovD(dest.AsFpuRegister(), src.AsFpuRegister());
2203       } else {
2204         UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
2205       }
2206     }
2207   }
2208 }
2209 
CopyRef(FrameOffset dest,FrameOffset src,ManagedRegister mscratch)2210 void Mips64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
2211                               ManagedRegister mscratch) {
2212   Mips64ManagedRegister scratch = mscratch.AsMips64();
2213   CHECK(scratch.IsGpuRegister()) << scratch;
2214   LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP, src.Int32Value());
2215   StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
2216 }
2217 
CopyRawPtrFromThread64(FrameOffset fr_offs,ThreadOffset<kMips64DoublewordSize> thr_offs,ManagedRegister mscratch)2218 void Mips64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
2219                                              ThreadOffset<kMips64DoublewordSize> thr_offs,
2220                                              ManagedRegister mscratch) {
2221   Mips64ManagedRegister scratch = mscratch.AsMips64();
2222   CHECK(scratch.IsGpuRegister()) << scratch;
2223   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
2224   StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
2225 }
2226 
CopyRawPtrToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)2227 void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs,
2228                                            FrameOffset fr_offs,
2229                                            ManagedRegister mscratch) {
2230   Mips64ManagedRegister scratch = mscratch.AsMips64();
2231   CHECK(scratch.IsGpuRegister()) << scratch;
2232   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
2233                  SP, fr_offs.Int32Value());
2234   StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(),
2235                 S1, thr_offs.Int32Value());
2236 }
2237 
Copy(FrameOffset dest,FrameOffset src,ManagedRegister mscratch,size_t size)2238 void Mips64Assembler::Copy(FrameOffset dest, FrameOffset src,
2239                            ManagedRegister mscratch, size_t size) {
2240   Mips64ManagedRegister scratch = mscratch.AsMips64();
2241   CHECK(scratch.IsGpuRegister()) << scratch;
2242   CHECK(size == 4 || size == 8) << size;
2243   if (size == 4) {
2244     LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP, src.Int32Value());
2245     StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value());
2246   } else if (size == 8) {
2247     LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), SP, src.Int32Value());
2248     StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value());
2249   } else {
2250     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
2251   }
2252 }
2253 
Copy(FrameOffset dest,ManagedRegister src_base,Offset src_offset,ManagedRegister mscratch,size_t size)2254 void Mips64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
2255                            ManagedRegister mscratch, size_t size) {
2256   GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
2257   CHECK(size == 4 || size == 8) << size;
2258   if (size == 4) {
2259     LoadFromOffset(kLoadWord, scratch, src_base.AsMips64().AsGpuRegister(),
2260                    src_offset.Int32Value());
2261     StoreToOffset(kStoreDoubleword, scratch, SP, dest.Int32Value());
2262   } else if (size == 8) {
2263     LoadFromOffset(kLoadDoubleword, scratch, src_base.AsMips64().AsGpuRegister(),
2264                    src_offset.Int32Value());
2265     StoreToOffset(kStoreDoubleword, scratch, SP, dest.Int32Value());
2266   } else {
2267     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
2268   }
2269 }
2270 
Copy(ManagedRegister dest_base,Offset dest_offset,FrameOffset src,ManagedRegister mscratch,size_t size)2271 void Mips64Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
2272                            ManagedRegister mscratch, size_t size) {
2273   GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
2274   CHECK(size == 4 || size == 8) << size;
2275   if (size == 4) {
2276     LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
2277     StoreToOffset(kStoreDoubleword, scratch, dest_base.AsMips64().AsGpuRegister(),
2278                   dest_offset.Int32Value());
2279   } else if (size == 8) {
2280     LoadFromOffset(kLoadDoubleword, scratch, SP, src.Int32Value());
2281     StoreToOffset(kStoreDoubleword, scratch, dest_base.AsMips64().AsGpuRegister(),
2282                   dest_offset.Int32Value());
2283   } else {
2284     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
2285   }
2286 }
2287 
Copy(FrameOffset dest ATTRIBUTE_UNUSED,FrameOffset src_base ATTRIBUTE_UNUSED,Offset src_offset ATTRIBUTE_UNUSED,ManagedRegister mscratch ATTRIBUTE_UNUSED,size_t size ATTRIBUTE_UNUSED)2288 void Mips64Assembler::Copy(FrameOffset dest ATTRIBUTE_UNUSED,
2289                            FrameOffset src_base ATTRIBUTE_UNUSED,
2290                            Offset src_offset ATTRIBUTE_UNUSED,
2291                            ManagedRegister mscratch ATTRIBUTE_UNUSED,
2292                            size_t size ATTRIBUTE_UNUSED) {
2293   UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
2294 }
2295 
Copy(ManagedRegister dest,Offset dest_offset,ManagedRegister src,Offset src_offset,ManagedRegister mscratch,size_t size)2296 void Mips64Assembler::Copy(ManagedRegister dest, Offset dest_offset,
2297                            ManagedRegister src, Offset src_offset,
2298                            ManagedRegister mscratch, size_t size) {
2299   GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
2300   CHECK(size == 4 || size == 8) << size;
2301   if (size == 4) {
2302     LoadFromOffset(kLoadWord, scratch, src.AsMips64().AsGpuRegister(), src_offset.Int32Value());
2303     StoreToOffset(kStoreDoubleword, scratch, dest.AsMips64().AsGpuRegister(), dest_offset.Int32Value());
2304   } else if (size == 8) {
2305     LoadFromOffset(kLoadDoubleword, scratch, src.AsMips64().AsGpuRegister(),
2306                    src_offset.Int32Value());
2307     StoreToOffset(kStoreDoubleword, scratch, dest.AsMips64().AsGpuRegister(),
2308                   dest_offset.Int32Value());
2309   } else {
2310     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
2311   }
2312 }
2313 
Copy(FrameOffset dest ATTRIBUTE_UNUSED,Offset dest_offset ATTRIBUTE_UNUSED,FrameOffset src ATTRIBUTE_UNUSED,Offset src_offset ATTRIBUTE_UNUSED,ManagedRegister mscratch ATTRIBUTE_UNUSED,size_t size ATTRIBUTE_UNUSED)2314 void Mips64Assembler::Copy(FrameOffset dest ATTRIBUTE_UNUSED,
2315                            Offset dest_offset ATTRIBUTE_UNUSED,
2316                            FrameOffset src ATTRIBUTE_UNUSED,
2317                            Offset src_offset ATTRIBUTE_UNUSED,
2318                            ManagedRegister mscratch ATTRIBUTE_UNUSED,
2319                            size_t size ATTRIBUTE_UNUSED) {
2320   UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
2321 }
2322 
MemoryBarrier(ManagedRegister mreg ATTRIBUTE_UNUSED)2323 void Mips64Assembler::MemoryBarrier(ManagedRegister mreg ATTRIBUTE_UNUSED) {
2324   // TODO: sync?
2325   UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
2326 }
2327 
CreateHandleScopeEntry(ManagedRegister mout_reg,FrameOffset handle_scope_offset,ManagedRegister min_reg,bool null_allowed)2328 void Mips64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
2329                                              FrameOffset handle_scope_offset,
2330                                              ManagedRegister min_reg,
2331                                              bool null_allowed) {
2332   Mips64ManagedRegister out_reg = mout_reg.AsMips64();
2333   Mips64ManagedRegister in_reg = min_reg.AsMips64();
2334   CHECK(in_reg.IsNoRegister() || in_reg.IsGpuRegister()) << in_reg;
2335   CHECK(out_reg.IsGpuRegister()) << out_reg;
2336   if (null_allowed) {
2337     Mips64Label null_arg;
2338     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
2339     // the address in the handle scope holding the reference.
2340     // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
2341     if (in_reg.IsNoRegister()) {
2342       LoadFromOffset(kLoadUnsignedWord, out_reg.AsGpuRegister(),
2343                      SP, handle_scope_offset.Int32Value());
2344       in_reg = out_reg;
2345     }
2346     if (!out_reg.Equals(in_reg)) {
2347       LoadConst32(out_reg.AsGpuRegister(), 0);
2348     }
2349     Beqzc(in_reg.AsGpuRegister(), &null_arg);
2350     Daddiu64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
2351     Bind(&null_arg);
2352   } else {
2353     Daddiu64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
2354   }
2355 }
2356 
CreateHandleScopeEntry(FrameOffset out_off,FrameOffset handle_scope_offset,ManagedRegister mscratch,bool null_allowed)2357 void Mips64Assembler::CreateHandleScopeEntry(FrameOffset out_off,
2358                                              FrameOffset handle_scope_offset,
2359                                              ManagedRegister mscratch,
2360                                              bool null_allowed) {
2361   Mips64ManagedRegister scratch = mscratch.AsMips64();
2362   CHECK(scratch.IsGpuRegister()) << scratch;
2363   if (null_allowed) {
2364     Mips64Label null_arg;
2365     LoadFromOffset(kLoadUnsignedWord, scratch.AsGpuRegister(), SP,
2366                    handle_scope_offset.Int32Value());
2367     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
2368     // the address in the handle scope holding the reference.
2369     // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
2370     Beqzc(scratch.AsGpuRegister(), &null_arg);
2371     Daddiu64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
2372     Bind(&null_arg);
2373   } else {
2374     Daddiu64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
2375   }
2376   StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, out_off.Int32Value());
2377 }
2378 
2379 // Given a handle scope entry, load the associated reference.
LoadReferenceFromHandleScope(ManagedRegister mout_reg,ManagedRegister min_reg)2380 void Mips64Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
2381                                                    ManagedRegister min_reg) {
2382   Mips64ManagedRegister out_reg = mout_reg.AsMips64();
2383   Mips64ManagedRegister in_reg = min_reg.AsMips64();
2384   CHECK(out_reg.IsGpuRegister()) << out_reg;
2385   CHECK(in_reg.IsGpuRegister()) << in_reg;
2386   Mips64Label null_arg;
2387   if (!out_reg.Equals(in_reg)) {
2388     LoadConst32(out_reg.AsGpuRegister(), 0);
2389   }
2390   Beqzc(in_reg.AsGpuRegister(), &null_arg);
2391   LoadFromOffset(kLoadDoubleword, out_reg.AsGpuRegister(),
2392                  in_reg.AsGpuRegister(), 0);
2393   Bind(&null_arg);
2394 }
2395 
VerifyObject(ManagedRegister src ATTRIBUTE_UNUSED,bool could_be_null ATTRIBUTE_UNUSED)2396 void Mips64Assembler::VerifyObject(ManagedRegister src ATTRIBUTE_UNUSED,
2397                                    bool could_be_null ATTRIBUTE_UNUSED) {
2398   // TODO: not validating references
2399 }
2400 
VerifyObject(FrameOffset src ATTRIBUTE_UNUSED,bool could_be_null ATTRIBUTE_UNUSED)2401 void Mips64Assembler::VerifyObject(FrameOffset src ATTRIBUTE_UNUSED,
2402                                    bool could_be_null ATTRIBUTE_UNUSED) {
2403   // TODO: not validating references
2404 }
2405 
Call(ManagedRegister mbase,Offset offset,ManagedRegister mscratch)2406 void Mips64Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister mscratch) {
2407   Mips64ManagedRegister base = mbase.AsMips64();
2408   Mips64ManagedRegister scratch = mscratch.AsMips64();
2409   CHECK(base.IsGpuRegister()) << base;
2410   CHECK(scratch.IsGpuRegister()) << scratch;
2411   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
2412                  base.AsGpuRegister(), offset.Int32Value());
2413   Jalr(scratch.AsGpuRegister());
2414   Nop();
2415   // TODO: place reference map on call
2416 }
2417 
Call(FrameOffset base,Offset offset,ManagedRegister mscratch)2418 void Mips64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
2419   Mips64ManagedRegister scratch = mscratch.AsMips64();
2420   CHECK(scratch.IsGpuRegister()) << scratch;
2421   // Call *(*(SP + base) + offset)
2422   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
2423                  SP, base.Int32Value());
2424   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
2425                  scratch.AsGpuRegister(), offset.Int32Value());
2426   Jalr(scratch.AsGpuRegister());
2427   Nop();
2428   // TODO: place reference map on call
2429 }
2430 
CallFromThread64(ThreadOffset<kMips64DoublewordSize> offset ATTRIBUTE_UNUSED,ManagedRegister mscratch ATTRIBUTE_UNUSED)2431 void Mips64Assembler::CallFromThread64(ThreadOffset<kMips64DoublewordSize> offset ATTRIBUTE_UNUSED,
2432                                        ManagedRegister mscratch ATTRIBUTE_UNUSED) {
2433   UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
2434 }
2435 
GetCurrentThread(ManagedRegister tr)2436 void Mips64Assembler::GetCurrentThread(ManagedRegister tr) {
2437   Move(tr.AsMips64().AsGpuRegister(), S1);
2438 }
2439 
GetCurrentThread(FrameOffset offset,ManagedRegister mscratch ATTRIBUTE_UNUSED)2440 void Mips64Assembler::GetCurrentThread(FrameOffset offset,
2441                                        ManagedRegister mscratch ATTRIBUTE_UNUSED) {
2442   StoreToOffset(kStoreDoubleword, S1, SP, offset.Int32Value());
2443 }
2444 
ExceptionPoll(ManagedRegister mscratch,size_t stack_adjust)2445 void Mips64Assembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
2446   Mips64ManagedRegister scratch = mscratch.AsMips64();
2447   exception_blocks_.emplace_back(scratch, stack_adjust);
2448   LoadFromOffset(kLoadDoubleword,
2449                  scratch.AsGpuRegister(),
2450                  S1,
2451                  Thread::ExceptionOffset<kMips64DoublewordSize>().Int32Value());
2452   Bnezc(scratch.AsGpuRegister(), exception_blocks_.back().Entry());
2453 }
2454 
EmitExceptionPoll(Mips64ExceptionSlowPath * exception)2455 void Mips64Assembler::EmitExceptionPoll(Mips64ExceptionSlowPath* exception) {
2456   Bind(exception->Entry());
2457   if (exception->stack_adjust_ != 0) {  // Fix up the frame.
2458     DecreaseFrameSize(exception->stack_adjust_);
2459   }
2460   // Pass exception object as argument.
2461   // Don't care about preserving A0 as this call won't return.
2462   CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
2463   Move(A0, exception->scratch_.AsGpuRegister());
2464   // Set up call to Thread::Current()->pDeliverException
2465   LoadFromOffset(kLoadDoubleword,
2466                  T9,
2467                  S1,
2468                  QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, pDeliverException).Int32Value());
2469   Jr(T9);
2470   Nop();
2471 
2472   // Call never returns
2473   Break();
2474 }
2475 
2476 }  // namespace mips64
2477 }  // namespace art
2478